1313require "digest/sha1"
1414require "forwardable"
1515require "logger"
16+ require "securerandom"
1617require "benchmark"
1718require "json"
1819require "openssl"
@@ -56,25 +57,57 @@ def initialize(sdk_key, config = Config.default, wait_for_sec = 5)
5657 end
5758
5859 @sdk_key = sdk_key
59- @hooks = Concurrent ::Array . new ( config . hooks )
60+ config . instance_id = SecureRandom . uuid
61+ @config = config
62+
63+ start_up ( wait_for_sec )
64+ end
65+
66+ #
67+ # Re-initializes an existing client after a process fork.
68+ #
69+ # The SDK relies on multiple background threads to operate correctly. When a process forks, `these threads are not
70+ # available to the child <https://apidock.com/ruby/Process/fork/class>`.
71+ #
72+ # As a result, the SDK will not function correctly in the child process until it is re-initialized.
73+ #
74+ # This method is effectively equivalent to instantiating a new client. Future iterations of the SDK will provide
75+ # increasingly efficient re-initializing improvements.
76+ #
77+ # Note that any configuration provided to the SDK will need to survive the forking process independently. For this
78+ # reason, it is recommended that any listener or hook integrations be added postfork unless you are certain it can
79+ # survive the forking process.
80+ #
81+ # @param wait_for_sec [Float] maximum time (in seconds) to wait for initialization
82+ #
83+ def postfork ( wait_for_sec = 5 )
84+ @data_source = nil
85+ @event_processor = nil
86+ @big_segment_store_manager = nil
87+
88+ start_up ( wait_for_sec )
89+ end
90+
91+ private def start_up ( wait_for_sec )
92+ @hooks = Concurrent ::Array . new ( @config . hooks )
6093
6194 @shared_executor = Concurrent ::SingleThreadExecutor . new
6295
63- data_store_broadcaster = LaunchDarkly ::Impl ::Broadcaster . new ( @shared_executor , config . logger )
96+ data_store_broadcaster = LaunchDarkly ::Impl ::Broadcaster . new ( @shared_executor , @ config. logger )
6497 store_sink = LaunchDarkly ::Impl ::DataStore ::UpdateSink . new ( data_store_broadcaster )
6598
6699 # We need to wrap the feature store object with a FeatureStoreClientWrapper in order to add
67100 # some necessary logic around updates. Unfortunately, we have code elsewhere that accesses
68101 # the feature store through the Config object, so we need to make a new Config that uses
69102 # the wrapped store.
70- @store = Impl ::FeatureStoreClientWrapper . new ( config . feature_store , store_sink , config . logger )
71- updated_config = config . clone
103+ @store = Impl ::FeatureStoreClientWrapper . new ( @ config. feature_store , store_sink , @ config. logger )
104+ updated_config = @ config. clone
72105 updated_config . instance_variable_set ( :@feature_store , @store )
73106 @config = updated_config
74107
75108 @data_store_status_provider = LaunchDarkly ::Impl ::DataStore ::StatusProvider . new ( @store , store_sink )
76109
77- @big_segment_store_manager = Impl ::BigSegmentStoreManager . new ( config . big_segments , @config . logger )
110+ @big_segment_store_manager = Impl ::BigSegmentStoreManager . new ( @ config. big_segments , @config . logger )
78111 @big_segment_store_status_provider = @big_segment_store_manager . status_provider
79112
80113 get_flag = lambda { |key | @store . get ( FEATURES , key ) }
@@ -83,15 +116,15 @@ def initialize(sdk_key, config = Config.default, wait_for_sec = 5)
83116 @evaluator = LaunchDarkly ::Impl ::Evaluator . new ( get_flag , get_segment , get_big_segments_membership , @config . logger )
84117
85118 if !@config . offline? && @config . send_events && !@config . diagnostic_opt_out?
86- diagnostic_accumulator = Impl ::DiagnosticAccumulator . new ( Impl ::DiagnosticAccumulator . create_diagnostic_id ( sdk_key ) )
119+ diagnostic_accumulator = Impl ::DiagnosticAccumulator . new ( Impl ::DiagnosticAccumulator . create_diagnostic_id ( @ sdk_key) )
87120 else
88121 diagnostic_accumulator = nil
89122 end
90123
91124 if @config . offline? || !@config . send_events
92125 @event_processor = NullEventProcessor . new
93126 else
94- @event_processor = EventProcessor . new ( sdk_key , config , nil , diagnostic_accumulator )
127+ @event_processor = EventProcessor . new ( @ sdk_key, @ config, nil , diagnostic_accumulator )
95128 end
96129
97130 if @config . use_ldd?
@@ -115,9 +148,9 @@ def initialize(sdk_key, config = Config.default, wait_for_sec = 5)
115148 # Currently, data source factories take two parameters unless they need to be aware of diagnostic_accumulator, in
116149 # which case they take three parameters. This will be changed in the future to use a less awkware mechanism.
117150 if data_source_or_factory . arity == 3
118- @data_source = data_source_or_factory . call ( sdk_key , @config , diagnostic_accumulator )
151+ @data_source = data_source_or_factory . call ( @ sdk_key, @config , diagnostic_accumulator )
119152 else
120- @data_source = data_source_or_factory . call ( sdk_key , @config )
153+ @data_source = data_source_or_factory . call ( @ sdk_key, @config )
121154 end
122155 else
123156 @data_source = data_source_or_factory
0 commit comments