1
1
use hstreamdb:: client:: Client ;
2
2
use hstreamdb:: producer:: FlushSettings ;
3
3
use hstreamdb:: { CompressionType , Record , Stream } ;
4
+ use rustler:: types:: atom:: ok;
4
5
use rustler:: { resource, Atom , Env , ResourceArc , Term } ;
5
6
use tokio:: sync:: mpsc:: { unbounded_channel, UnboundedSender } ;
6
7
7
8
mod runtime;
8
9
9
10
rustler:: atoms! {
10
- none, gzip, zstd
11
+ none, gzip, zstd,
12
+ len, size
11
13
}
12
14
15
+ rustler:: init!(
16
+ "hstreamdb" ,
17
+ [ create_stream, start_producer, append] ,
18
+ load = load
19
+ ) ;
20
+
13
21
#[ derive( Clone ) ]
14
22
pub struct NifAppender ( UnboundedSender < Record > ) ;
15
23
16
24
fn load ( env : Env , _: Term ) -> bool {
17
25
resource ! ( NifAppender , env) ;
26
+ env_logger:: init ( ) ;
18
27
true
19
28
}
20
29
@@ -25,7 +34,7 @@ pub fn create_stream(
25
34
replication_factor : u32 ,
26
35
backlog_duration : u32 ,
27
36
shard_count : u32 ,
28
- ) {
37
+ ) -> Atom {
29
38
let future = async move {
30
39
let mut client = Client :: new ( url) . await . unwrap ( ) ;
31
40
client
@@ -38,20 +47,21 @@ pub fn create_stream(
38
47
. await
39
48
. unwrap ( )
40
49
} ;
41
- _ = runtime:: spawn ( future)
50
+ _ = runtime:: spawn ( future) ;
51
+ ok ( )
42
52
}
43
53
44
54
#[ rustler:: nif]
45
55
pub fn start_producer (
46
56
url : String ,
47
57
stream_name : String ,
48
58
compression_type : Atom ,
59
+ flush_settings : Term ,
49
60
) -> ResourceArc < NifAppender > {
50
61
let ( request_sender, request_receiver) = unbounded_channel :: < Record > ( ) ;
62
+ let compression_type = atom_to_compression_type ( compression_type) ;
63
+ let flush_settings = new_flush_settings ( flush_settings) ;
51
64
let future = async move {
52
- let compression_type = atom_to_compression_type ( compression_type) ;
53
- let flush_settings = FlushSettings { len : 0 , size : 0 } ;
54
-
55
65
let mut client = Client :: new ( url) . await . unwrap ( ) ;
56
66
let ( appender, mut producer) = client
57
67
. new_producer ( stream_name, compression_type, flush_settings)
@@ -72,16 +82,17 @@ pub fn start_producer(
72
82
}
73
83
74
84
#[ rustler:: nif]
75
- fn append ( producer : ResourceArc < NifAppender > , partition_key : String , raw_payload : String ) {
85
+ fn append ( producer : ResourceArc < NifAppender > , partition_key : String , raw_payload : String ) -> Atom {
76
86
let record = Record {
77
87
partition_key,
78
88
payload : hstreamdb:: Payload :: RawRecord ( raw_payload. into_bytes ( ) ) ,
79
89
} ;
80
90
let producer = & producer. 0 ;
81
91
producer. send ( record) . unwrap ( ) ;
92
+ ok ( )
82
93
}
83
94
84
- pub fn atom_to_compression_type ( compression_type : Atom ) -> CompressionType {
95
+ fn atom_to_compression_type ( compression_type : Atom ) -> CompressionType {
85
96
if compression_type == none ( ) {
86
97
CompressionType :: None
87
98
} else if compression_type == gzip ( ) {
@@ -93,8 +104,28 @@ pub fn atom_to_compression_type(compression_type: Atom) -> CompressionType {
93
104
}
94
105
}
95
106
96
- rustler:: init!(
97
- "hstreamdb" ,
98
- [ create_stream, start_producer, append] ,
99
- load = load
100
- ) ;
107
+ fn new_flush_settings ( proplists : Term ) -> FlushSettings {
108
+ let proplists = proplists. into_list_iterator ( ) . unwrap ( ) ;
109
+ let mut len_v = usize:: MAX ;
110
+ let mut size_v = usize:: MAX ;
111
+
112
+ for x in proplists {
113
+ if x. is_tuple ( ) {
114
+ let ( k, v) : ( Atom , usize ) = x. decode ( ) . unwrap ( ) ;
115
+ if k == len ( ) {
116
+ len_v = v;
117
+ } else if k == size ( ) {
118
+ size_v = v;
119
+ }
120
+ }
121
+ }
122
+
123
+ if len_v == usize:: MAX && size_v == usize:: MAX {
124
+ len_v = 0 ;
125
+ size_v = 0 ;
126
+ }
127
+ FlushSettings {
128
+ len : len_v,
129
+ size : size_v,
130
+ }
131
+ }
0 commit comments