diff --git a/qa/1290 b/qa/1290 index d515438fb4f..db5e7477bbe 100755 --- a/qa/1290 +++ b/qa/1290 @@ -265,7 +265,7 @@ then else echo "Warning: no extra suppressions found for valgrind version $__version" >>$seq_full fi -# skip valgrind errors in upstream non-PCP code, like hiredis ... +# skip valgrind errors in upstream non-PCP code, like libvalkey ... # (seen on vm11 Debian 10.13) # ... same suppressions as for qa/1662 # diff --git a/qa/1543.out b/qa/1543.out index ba2935537c9..71c03e31657 100644 --- a/qa/1543.out +++ b/qa/1543.out @@ -507,8 +507,8 @@ Series checked (indom) "text-help": "Universally 9 instances numbered 100 .. 900 in steps of 100, and named\u000A\u0022bin-100\u0022 .. \u0022bin-900\u0022", "instances": [ { - "instance": 900, - "name": "bin-900", + "instance": 300, + "name": "bin-300", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -516,8 +516,17 @@ Series checked (indom) } }, { - "instance": 100, - "name": "bin-100", + "instance": 700, + "name": "bin-700", + "labels": { + "domainname": "DOMAINNAME" + "hostname": "HOSTNAME" + "machineid": "MACHINEID" + } + }, + { + "instance": 400, + "name": "bin-400", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -534,8 +543,8 @@ Series checked (indom) } }, { - "instance": 400, - "name": "bin-400", + "instance": 100, + "name": "bin-100", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -552,8 +561,8 @@ Series checked (indom) } }, { - "instance": 300, - "name": "bin-300", + "instance": 900, + "name": "bin-900", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -577,15 +586,6 @@ Series checked (indom) "hostname": "HOSTNAME" "machineid": "MACHINEID" } - }, - { - "instance": 700, - "name": "bin-700", - "labels": { - "domainname": "DOMAINNAME" - "hostname": "HOSTNAME" - "machineid": "MACHINEID" - } } ] } @@ -602,8 +602,8 @@ Series checked (indom) "text-help": "Universally 9 instances numbered 100 .. 900 in steps of 100, and named\u000A\u0022bin-100\u0022 .. \u0022bin-900\u0022", "instances": [ { - "instance": 900, - "name": "bin-900", + "instance": 300, + "name": "bin-300", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -611,8 +611,17 @@ Series checked (indom) } }, { - "instance": 100, - "name": "bin-100", + "instance": 700, + "name": "bin-700", + "labels": { + "domainname": "DOMAINNAME" + "hostname": "HOSTNAME" + "machineid": "MACHINEID" + } + }, + { + "instance": 400, + "name": "bin-400", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -629,8 +638,8 @@ Series checked (indom) } }, { - "instance": 400, - "name": "bin-400", + "instance": 100, + "name": "bin-100", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -647,8 +656,8 @@ Series checked (indom) } }, { - "instance": 300, - "name": "bin-300", + "instance": 900, + "name": "bin-900", "labels": { "domainname": "DOMAINNAME" "hostname": "HOSTNAME" @@ -672,15 +681,6 @@ Series checked (indom) "hostname": "HOSTNAME" "machineid": "MACHINEID" } - }, - { - "instance": 700, - "name": "bin-700", - "labels": { - "domainname": "DOMAINNAME" - "hostname": "HOSTNAME" - "machineid": "MACHINEID" - } } ] } @@ -971,16 +971,16 @@ Series checked (indom) # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 == scrape two metrics == # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 # PCP5 sample.long.ten 29.0.11 32 PM_INDOM_NULL instant none # HELP sample_long_ten 10 as a 32-bit integer # TYPE sample_long_ten gauge -sample_long_ten{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 10 +sample_long_ten{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 10 == scrape bad metric name == { "context": "CONTEXT" @@ -991,82 +991,82 @@ sample_long_ten{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 # PCP5 sample.long.ten 29.0.11 32 PM_INDOM_NULL instant none # HELP sample_long_ten 10 as a 32-bit integer # TYPE sample_long_ten gauge -sample_long_ten{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 10 +sample_long_ten{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 10 # PCP5 sample.long.hundred 29.0.12 32 PM_INDOM_NULL instant none # HELP sample_long_hundred 100 as a 32-bit integer # TYPE sample_long_hundred gauge -sample_long_hundred{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 100 +sample_long_hundred{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 100 # PCP5 sample.long.million 29.0.13 32 PM_INDOM_NULL instant none # HELP sample_long_million 1000000 as a 32-bit integer # TYPE sample_long_million gauge -sample_long_million{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1000000 +sample_long_million{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1000000 # PCP5 sample.long.write_me 29.0.14 32 PM_INDOM_NULL instant none # HELP sample_long_write_me a 32-bit integer that can be modified # TYPE sample_long_write_me gauge -sample_long_write_me{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID",changed="true"} 24 +sample_long_write_me{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",changed="true",cluster="zero",role="testing",machineid="MACHINEID"} 24 # PCP5 sample.dupnames.five.long_bin 29.0.103 32 29.2 instant none # HELP sample_dupnames_five_long_bin like sample.bin but type 32 # TYPE sample_dupnames_five_long_bin gauge -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAINNAME",machineid="MACHINEID",bin="100"} 100 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAINNAME",machineid="MACHINEID",bin="200"} 200 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAINNAME",machineid="MACHINEID",bin="300"} 300 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAINNAME",machineid="MACHINEID",bin="400"} 400 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAINNAME",machineid="MACHINEID",bin="500"} 500 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAINNAME",machineid="MACHINEID",bin="600"} 600 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAINNAME",machineid="MACHINEID",bin="700"} 700 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAINNAME",machineid="MACHINEID",bin="800"} 800 -sample_dupnames_five_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAINNAME",machineid="MACHINEID",bin="900"} 900 +sample_dupnames_five_long_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="100"} 100 +sample_dupnames_five_long_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="200"} 200 +sample_dupnames_five_long_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="300"} 300 +sample_dupnames_five_long_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="400"} 400 +sample_dupnames_five_long_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="500"} 500 +sample_dupnames_five_long_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="600"} 600 +sample_dupnames_five_long_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="700"} 700 +sample_dupnames_five_long_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="800"} 800 +sample_dupnames_five_long_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="900"} 900 # PCP5 sample.long.bin 29.0.103 32 29.2 instant none # HELP sample_long_bin like sample.bin but type 32 # TYPE sample_long_bin gauge -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAINNAME",machineid="MACHINEID",bin="100"} 100 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAINNAME",machineid="MACHINEID",bin="200"} 200 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAINNAME",machineid="MACHINEID",bin="300"} 300 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAINNAME",machineid="MACHINEID",bin="400"} 400 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAINNAME",machineid="MACHINEID",bin="500"} 500 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAINNAME",machineid="MACHINEID",bin="600"} 600 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAINNAME",machineid="MACHINEID",bin="700"} 700 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAINNAME",machineid="MACHINEID",bin="800"} 800 -sample_long_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAINNAME",machineid="MACHINEID",bin="900"} 900 +sample_long_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="100"} 100 +sample_long_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="200"} 200 +sample_long_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="300"} 300 +sample_long_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="400"} 400 +sample_long_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="500"} 500 +sample_long_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="600"} 600 +sample_long_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="700"} 700 +sample_long_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="800"} 800 +sample_long_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="900"} 900 # PCP5 sample.long.bin_ctr 29.0.104 32 29.2 counter Kbyte # HELP sample_long_bin_ctr like sample.bin but type 32, SEM_COUNTER and SPACE_KBYTE # TYPE sample_long_bin_ctr counter -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAINNAME",machineid="MACHINEID",bin="100"} 100 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAINNAME",machineid="MACHINEID",bin="200"} 200 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAINNAME",machineid="MACHINEID",bin="300"} 300 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAINNAME",machineid="MACHINEID",bin="400"} 400 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAINNAME",machineid="MACHINEID",bin="500"} 500 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAINNAME",machineid="MACHINEID",bin="600"} 600 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAINNAME",machineid="MACHINEID",bin="700"} 700 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAINNAME",machineid="MACHINEID",bin="800"} 800 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAINNAME",machineid="MACHINEID",bin="900"} 900 +sample_long_bin_ctr{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="100"} 100 +sample_long_bin_ctr{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="200"} 200 +sample_long_bin_ctr{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="300"} 300 +sample_long_bin_ctr{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="400"} 400 +sample_long_bin_ctr{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="500"} 500 +sample_long_bin_ctr{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="600"} 600 +sample_long_bin_ctr{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="700"} 700 +sample_long_bin_ctr{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="800"} 800 +sample_long_bin_ctr{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME",bin="900"} 900 == scrape metric instances == # PCP5 sample.dupnames.four.colour 29.0.5 32 29.1 instant none # HELP sample_dupnames_four_colour Metrics with a "saw-tooth" trend over time # TYPE sample_dupnames_four_colour gauge -sample_dupnames_four_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="red",instid="0",domainname="DOMAINNAME",machineid="MACHINEID"} 143 -sample_dupnames_four_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="green",instid="1",domainname="DOMAINNAME",machineid="MACHINEID"} 244 -sample_dupnames_four_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="blue",instid="2",domainname="DOMAINNAME",machineid="MACHINEID"} 345 +sample_dupnames_four_colour{instid="0",agent="sample",hostname="HOSTNAME",instname="red",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 143 +sample_dupnames_four_colour{instid="1",agent="sample",hostname="HOSTNAME",instname="green",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 244 +sample_dupnames_four_colour{instid="2",agent="sample",hostname="HOSTNAME",instname="blue",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 345 # PCP5 sample.colour 29.0.5 32 29.1 instant none # HELP sample_colour Metrics with a "saw-tooth" trend over time # TYPE sample_colour gauge -sample_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="red",instid="0",domainname="DOMAINNAME",machineid="MACHINEID"} 143 -sample_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="green",instid="1",domainname="DOMAINNAME",machineid="MACHINEID"} 244 -sample_colour{role="testing",agent="sample",model="RGB",cluster="zero",hostname="HOSTNAME",instname="blue",instid="2",domainname="DOMAINNAME",machineid="MACHINEID"} 345 +sample_colour{instid="0",agent="sample",hostname="HOSTNAME",instname="red",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 143 +sample_colour{instid="1",agent="sample",hostname="HOSTNAME",instname="green",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 244 +sample_colour{instid="2",agent="sample",hostname="HOSTNAME",instname="blue",model="RGB",machineid="MACHINEID",role="testing",cluster="zero",domainname="DOMAINNAME"} 345 == small curl compression command == # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 == large curl compression command == # PCP5 sample.dupnames.pid_daemon 29.0.1 u32 PM_INDOM_NULL instant none # HELP sample_dupnames_pid_daemon Process id of PMDA daemon # TYPE sample_dupnames_pid_daemon gauge -sample_dupnames_pid_daemon{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} PID +sample_dupnames_pid_daemon{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} PID # PCP5 sample.dupnames.daemon_pid 29.0.1 u32 PM_INDOM_NULL instant none # HELP sample_dupnames_daemon_pid Process id of PMDA daemon # TYPE sample_dupnames_daemon_pid gauge @@ -1078,21 +1078,21 @@ pmproxy check passed # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 == good filter exact match == # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 # PCP5 sample.long.ten 29.0.11 32 PM_INDOM_NULL instant none # HELP sample_long_ten 10 as a 32-bit integer # TYPE sample_long_ten gauge -sample_long_ten{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 10 +sample_long_ten{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 10 == good filter regex == # PCP5 sample.long.one 29.0.10 32 PM_INDOM_NULL instant none # HELP sample_long_one 1 as a 32-bit integer # TYPE sample_long_one gauge -sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAINNAME",machineid="MACHINEID"} 1 +sample_long_one{hostname="HOSTNAME",agent="sample",domainname="DOMAINNAME",cluster="zero",role="testing",machineid="MACHINEID"} 1 == bad filter match param == {"context":CONTEXT,"message":"dummy - invalid 'match' parameter value","success":false} == scrape one metric == @@ -1108,15 +1108,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "machineid", + "key": "domainname", "value": { - "stringValue": "MACHINEID" + "stringValue: "DOMAINNAME" } }, { - "key": "domainname", + "key": "machineid", "value": { - "stringValue: "DOMAINNAME" + "stringValue": "MACHINEID" } } ] @@ -1148,12 +1148,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1165,6 +1159,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1192,15 +1192,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "machineid", + "key": "domainname", "value": { - "stringValue": "MACHINEID" + "stringValue: "DOMAINNAME" } }, { - "key": "domainname", + "key": "machineid", "value": { - "stringValue: "DOMAINNAME" + "stringValue": "MACHINEID" } } ] @@ -1232,12 +1232,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1249,6 +1243,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1277,12 +1277,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1294,6 +1288,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1327,15 +1327,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "machineid", + "key": "domainname", "value": { - "stringValue": "MACHINEID" + "stringValue: "DOMAINNAME" } }, { - "key": "domainname", + "key": "machineid", "value": { - "stringValue: "DOMAINNAME" + "stringValue": "MACHINEID" } } ] @@ -1367,12 +1367,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1384,6 +1378,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1412,12 +1412,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1429,6 +1423,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1457,12 +1457,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1474,6 +1468,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1502,12 +1502,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -1519,6 +1513,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, @@ -1548,15 +1548,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "agent", "value": { - "stringValue": "testing" + "stringValue": "sample" } }, { - "key": "agent", + "key": "changed", "value": { - "stringValue": "sample" + "stringValue": "true" } }, { @@ -1566,9 +1566,9 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "changed", + "key": "role", "value": { - "stringValue": "true" + "stringValue": "testing" } } ], @@ -1599,39 +1599,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 100 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-100" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 100 + "stringValue": "100" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-100" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "100" + "stringValue": "zero" } } ], @@ -1653,39 +1653,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 200 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-200" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 200 + "stringValue": "200" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-200" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "200" + "stringValue": "zero" } } ], @@ -1707,39 +1707,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 300 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-300" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 300 + "stringValue": "300" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-300" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "300" + "stringValue": "zero" } } ], @@ -1761,39 +1761,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 400 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-400" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 400 + "stringValue": "400" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-400" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "400" + "stringValue": "zero" } } ], @@ -1815,39 +1815,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 500 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-500" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 500 + "stringValue": "500" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-500" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "500" + "stringValue": "zero" } } ], @@ -1869,39 +1869,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 600 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-600" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 600 + "stringValue": "600" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-600" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "600" + "stringValue": "zero" } } ], @@ -1923,39 +1923,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 700 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-700" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 700 + "stringValue": "700" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-700" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "700" + "stringValue": "zero" } } ], @@ -1977,39 +1977,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 800 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-800" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 800 + "stringValue": "800" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-800" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "800" + "stringValue": "zero" } } ], @@ -2031,39 +2031,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 900 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-900" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 900 + "stringValue": "900" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-900" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "900" + "stringValue": "zero" } } ], @@ -2094,39 +2094,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 100 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-100" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 100 + "stringValue": "100" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-100" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "100" + "stringValue": "zero" } } ], @@ -2148,39 +2148,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 200 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-200" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 200 + "stringValue": "200" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-200" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "200" + "stringValue": "zero" } } ], @@ -2202,39 +2202,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 300 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-300" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 300 + "stringValue": "300" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-300" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "300" + "stringValue": "zero" } } ], @@ -2256,39 +2256,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 400 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-400" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 400 + "stringValue": "400" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-400" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "400" + "stringValue": "zero" } } ], @@ -2310,39 +2310,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 500 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-500" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 500 + "stringValue": "500" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-500" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "500" + "stringValue": "zero" } } ], @@ -2364,39 +2364,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 600 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-600" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 600 + "stringValue": "600" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-600" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "600" + "stringValue": "zero" } } ], @@ -2418,39 +2418,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 700 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-700" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 700 + "stringValue": "700" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-700" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "700" + "stringValue": "zero" } } ], @@ -2472,39 +2472,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 800 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-800" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 800 + "stringValue": "800" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-800" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "800" + "stringValue": "zero" } } ], @@ -2526,39 +2526,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 900 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-900" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 900 + "stringValue": "900" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-900" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "900" + "stringValue": "zero" } } ], @@ -2591,39 +2591,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 100 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-100" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 100 + "stringValue": "100" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-100" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "100" + "stringValue": "zero" } } ], @@ -2645,39 +2645,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 200 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-200" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 200 + "stringValue": "200" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-200" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "200" + "stringValue": "zero" } } ], @@ -2699,39 +2699,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 300 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-300" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 300 + "stringValue": "300" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-300" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "300" + "stringValue": "zero" } } ], @@ -2753,39 +2753,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 400 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-400" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 400 + "stringValue": "400" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-400" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "400" + "stringValue": "zero" } } ], @@ -2807,39 +2807,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 500 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-500" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 500 + "stringValue": "500" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-500" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "500" + "stringValue": "zero" } } ], @@ -2861,39 +2861,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 600 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-600" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 600 + "stringValue": "600" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-600" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "600" + "stringValue": "zero" } } ], @@ -2915,39 +2915,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 700 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-700" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 700 + "stringValue": "700" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-700" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "700" + "stringValue": "zero" } } ], @@ -2969,39 +2969,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 800 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-800" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 800 + "stringValue": "800" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-800" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "800" + "stringValue": "zero" } } ], @@ -3023,39 +3023,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 900 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "bin-900" } }, { - "key": "cluster", + "key": "agent", "value": { - "stringValue": "zero" + "stringValue": "sample" } }, { - "key": "instid", + "key": "bin", "value": { - "intValue": 900 + "stringValue": "900" } }, { - "key": "instname", + "key": "role", "value": { - "stringValue": "bin-900" + "stringValue": "testing" } }, { - "key": "bin", + "key": "cluster", "value": { - "stringValue": "900" + "stringValue": "zero" } } ], @@ -3084,15 +3084,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "machineid", + "key": "domainname", "value": { - "stringValue": "MACHINEID" + "stringValue: "DOMAINNAME" } }, { - "key": "domainname", + "key": "machineid", "value": { - "stringValue: "DOMAINNAME" + "stringValue": "MACHINEID" } } ] @@ -3125,39 +3125,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 0 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "red" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 0 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "red" + "stringValue": "zero" } } ], @@ -3179,39 +3179,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 1 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "green" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 1 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "green" + "stringValue": "zero" } } ], @@ -3233,39 +3233,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 2 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "blue" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 2 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "blue" + "stringValue": "zero" } } ], @@ -3296,39 +3296,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 0 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "red" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 0 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "red" + "stringValue": "zero" } } ], @@ -3350,39 +3350,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 1 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "green" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 1 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "green" + "stringValue": "zero" } } ], @@ -3404,39 +3404,39 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "role", + "key": "instid", "value": { - "stringValue": "testing" + "intValue": 2 } }, { - "key": "agent", + "key": "instname", "value": { - "stringValue": "sample" + "stringValue": "blue" } }, { - "key": "model", + "key": "agent", "value": { - "stringValue": "RGB" + "stringValue": "sample" } }, { - "key": "cluster", + "key": "model", "value": { - "stringValue": "zero" + "stringValue": "RGB" } }, { - "key": "instid", + "key": "role", "value": { - "intValue": 2 + "stringValue": "testing" } }, { - "key": "instname", + "key": "cluster", "value": { - "stringValue": "blue" + "stringValue": "zero" } } ], @@ -3465,15 +3465,15 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" } }, { - "key": "machineid", + "key": "domainname", "value": { - "stringValue": "MACHINEID" + "stringValue: "DOMAINNAME" } }, { - "key": "domainname", + "key": "machineid", "value": { - "stringValue: "DOMAINNAME" + "stringValue": "MACHINEID" } } ] @@ -3505,12 +3505,6 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "stringValue": "32" } }, - { - "key": "role", - "value": { - "stringValue": "testing" - } - }, { "key": "agent", "value": { @@ -3522,6 +3516,12 @@ sample_long_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero" "value": { "stringValue": "zero" } + }, + { + "key": "role", + "value": { + "stringValue": "testing" + } } ], "timeUnixNano": NANOSECONDS, diff --git a/qa/1602 b/qa/1602 index 16877f35975..222d513e00e 100755 --- a/qa/1602 +++ b/qa/1602 @@ -59,7 +59,9 @@ _filter_key_server_err() { sed \ -e 's;ERR syntax error;expected error;' \ - -e 's;Parse command error.*;expected error;' + -e 's;Failed to find keys of command.*;expected error;' \ + -e 's;Unknown command.*;expected error;' \ + -e 's;Command parse error;expected error;' } _stop_auto_restart pmproxy diff --git a/qa/1662 b/qa/1662 index 9916b304f5e..b05b8db7127 100755 --- a/qa/1662 +++ b/qa/1662 @@ -96,7 +96,7 @@ path = $dir EOF src/time_stamp $tmp.stamp 'pmproxy start' >>$seq_full grind_extra="--track-origins=yes --read-var-info=yes --read-inline-info=yes" -# skip valgrind errors in upstream non-PCP code, like hiredis ... +# skip valgrind errors in upstream non-PCP code, like libvalkey ... # (seen on vm11 Debian 10.13) # ... same suppressions as for qa/1290 # diff --git a/qa/1727 b/qa/1727 index 155e747f0bf..6bae6fd6eca 100755 --- a/qa/1727 +++ b/qa/1727 @@ -96,7 +96,35 @@ fi echo; echo === /metrics webapi listing. The instname label should appear only once. curl -Gs 'http://localhost:44322/metrics?names=openmetrics.duplicate_instname_label.somemetric' \ -| _filter_openmetrics_labels +| _filter_openmetrics_labels \ +| awk '{ + if (match($0, /\{[^}]+\}/)) { + prefix = substr($0, 1, RSTART-1) + labels_str = substr($0, RSTART+1, RLENGTH-2) + suffix = substr($0, RSTART+RLENGTH) + + split("", labels) + n = split(labels_str, pairs, ",") + for (i = 1; i <= n; i++) { + split(pairs[i], kv, "=") + labels[kv[1]] = kv[2] + } + + order = "script agent hostname instid instname domainname machineid source" + split(order, keys, " ") + result = "" + for (i = 1; i <= 8; i++) { + key = keys[i] + if (key in labels) { + if (result != "") result = result "," + result = result key "=" labels[key] + } + } + print prefix "{" result "}" suffix + } else { + print + } +}' echo; echo === verify metric name validity using pminfo pminfo -v openmetrics diff --git a/qa/1890 b/qa/1890 index d12dadec2e6..49e11989e6d 100755 --- a/qa/1890 +++ b/qa/1890 @@ -56,6 +56,45 @@ _filter_opentelemetry_labels() # end } +_reorder_labels() +{ + awk ' + /^[^#].*{.*=.*}/ { + match($0, /^([^{]+){([^}]+)}(.*)$/, a) + metric = a[1] + labels_str = a[2] + rest = a[3] + + delete labels + n = split(labels_str, pairs, ",") + for (i = 1; i <= n; i++) { + match(pairs[i], /^([^=]+)=(.+)$/, kv) + key = kv[1] + value = kv[2] + labels[key] = value + } + + order = "url agent hostname domainname machineid source my_sum_attribute" + split(order, order_arr, " ") + + output = metric "{" + first = 1 + for (i = 1; i <= length(order_arr); i++) { + key = order_arr[i] + if (key in labels) { + if (!first) output = output "," + output = output key "=" labels[key] + first = 0 + } + } + output = output "}" rest + print output + next + } + {print} + ' +} + need_restore=true _prepare_pmda opentelemetry trap "_cleanup; exit \$status" 0 1 2 3 15 @@ -83,7 +122,7 @@ fi echo; echo === /metrics webapi listing. The instname label should appear only once. curl -Gs 'http://localhost:44322/metrics?names=opentelemetry.duplicate.somemetric' \ -| _filter_opentelemetry_labels +| _filter_opentelemetry_labels | _reorder_labels echo; echo === verify metric name validity using pminfo pminfo -v opentelemetry diff --git a/qa/1900.out b/qa/1900.out index 431462e8b85..a075f7b6435 100644 --- a/qa/1900.out +++ b/qa/1900.out @@ -11,14 +11,14 @@ Check bpf metrics have appeared ... X metrics and X values == Running pmdabpf with valgrind === std out === -bpf.disk.all.latency PMID: 157.1.0 [Disk latency] - Data Type: 64-bit unsigned int InDom: 157.3 0x27400003 +bpf.disk.all.latency PMID: 157.0.0 [Disk latency] + Data Type: 64-bit unsigned int InDom: 157.2 0x27400002 Semantics: counter Units: microsec Help: Disk latency histogram across all disks, for both reads and writes. -bpf.runq.latency PMID: 157.0.0 [Run queue latency (ns)] - Data Type: 64-bit unsigned int InDom: 157.2 0x27400002 +bpf.runq.latency PMID: 157.1.0 [Run queue latency (ns)] + Data Type: 64-bit unsigned int InDom: 157.3 0x27400003 Semantics: counter Units: nanosec Help: Run queue latency from task switches, diff --git a/qa/662.out b/qa/662.out index bcdd35b24e2..53ce32f1e68 100644 --- a/qa/662.out +++ b/qa/662.out @@ -344,115 +344,115 @@ metrics: success #### PCP5 sample.dupnames.five.bin 29.0.6 32 29.2 instant none #### HELP sample_dupnames_five_bin Several constant instances #### TYPE sample_dupnames_five_bin gauge -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_dupnames_five_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_dupnames_five_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_dupnames_five_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_dupnames_five_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_dupnames_five_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_dupnames_five_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_dupnames_five_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_dupnames_five_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_dupnames_five_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_dupnames_five_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 #### PCP5 sample.dupnames.four.bin 29.0.6 32 29.2 instant none #### HELP sample_dupnames_four_bin Several constant instances #### TYPE sample_dupnames_four_bin gauge -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_dupnames_four_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_dupnames_four_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_dupnames_four_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_dupnames_four_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_dupnames_four_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_dupnames_four_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_dupnames_four_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_dupnames_four_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_dupnames_four_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_dupnames_four_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 #### PCP5 sample.dupnames.three.bin 29.0.6 32 29.2 instant none #### HELP sample_dupnames_three_bin Several constant instances #### TYPE sample_dupnames_three_bin gauge -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_dupnames_three_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_dupnames_three_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_dupnames_three_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_dupnames_three_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_dupnames_three_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_dupnames_three_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_dupnames_three_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_dupnames_three_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_dupnames_three_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_dupnames_three_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 #### PCP5 sample.dupnames.two.bin 29.0.6 32 29.2 instant none #### HELP sample_dupnames_two_bin Several constant instances #### TYPE sample_dupnames_two_bin gauge -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_dupnames_two_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_dupnames_two_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_dupnames_two_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_dupnames_two_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_dupnames_two_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_dupnames_two_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_dupnames_two_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_dupnames_two_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_dupnames_two_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_dupnames_two_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 #### PCP5 sample.bin 29.0.6 32 29.2 instant none #### HELP sample_bin Several constant instances #### TYPE sample_bin gauge -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 context #### metrics target=sample.float.bin response code 200 metrics: success #### PCP5 sample.float.bin 29.0.107 float 29.2 instant none #### HELP sample_float_bin like sample.bin but type FLOAT #### TYPE sample_float_bin gauge -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_float_bin{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_float_bin{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_float_bin{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_float_bin{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_float_bin{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_float_bin{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_float_bin{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_float_bin{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_float_bin{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_float_bin{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 context #### metrics target=sample.double.one response code 200 metrics: success #### PCP5 sample.double.one 29.0.25 double PM_INDOM_NULL instant none #### HELP sample_double_one 1 as a 64-bit floating point value #### TYPE sample_double_one gauge -sample_double_one{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",domainname="DOMAIN",machineid="MACHINE"} 1 +sample_double_one{hostname="HOSTNAME",agent="sample",domainname="DOMAIN",cluster="zero",role="testing",machineid="MACHINE"} 1 context #### metrics target=sample.long.bin_ctr response code 200 metrics: success #### PCP5 sample.long.bin_ctr 29.0.104 32 29.2 counter Kbyte #### HELP sample_long_bin_ctr like sample.bin but type 32, SEM_COUNTER and SPACE_KBYTE #### TYPE sample_long_bin_ctr counter -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_long_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_long_bin_ctr{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_long_bin_ctr{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_long_bin_ctr{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_long_bin_ctr{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_long_bin_ctr{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_long_bin_ctr{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_long_bin_ctr{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_long_bin_ctr{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_long_bin_ctr{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 context #### metrics target=sample.ulong.bin_ctr response code 200 metrics: success #### PCP5 sample.ulong.bin_ctr 29.0.106 u32 29.2 counter Kbyte #### HELP sample_ulong_bin_ctr like sample.bin but type U32, SEM_COUNTER and SPACE_KBYTE #### TYPE sample_ulong_bin_ctr counter -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-100",instid="100",domainname="DOMAIN",machineid="MACHINE",bin="100"} 100 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-200",instid="200",domainname="DOMAIN",machineid="MACHINE",bin="200"} 200 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-300",instid="300",domainname="DOMAIN",machineid="MACHINE",bin="300"} 300 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-400",instid="400",domainname="DOMAIN",machineid="MACHINE",bin="400"} 400 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-500",instid="500",domainname="DOMAIN",machineid="MACHINE",bin="500"} 500 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-600",instid="600",domainname="DOMAIN",machineid="MACHINE",bin="600"} 600 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-700",instid="700",domainname="DOMAIN",machineid="MACHINE",bin="700"} 700 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-800",instid="800",domainname="DOMAIN",machineid="MACHINE",bin="800"} 800 -sample_ulong_bin_ctr{role="testing",agent="sample",hostname="HOSTNAME",cluster="zero",instname="bin-900",instid="900",domainname="DOMAIN",machineid="MACHINE",bin="900"} 900 +sample_ulong_bin_ctr{instid="100",agent="sample",hostname="HOSTNAME",instname="bin-100",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="100"} 100 +sample_ulong_bin_ctr{instid="200",agent="sample",hostname="HOSTNAME",instname="bin-200",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="200"} 200 +sample_ulong_bin_ctr{instid="300",agent="sample",hostname="HOSTNAME",instname="bin-300",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="300"} 300 +sample_ulong_bin_ctr{instid="400",agent="sample",hostname="HOSTNAME",instname="bin-400",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="400"} 400 +sample_ulong_bin_ctr{instid="500",agent="sample",hostname="HOSTNAME",instname="bin-500",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="500"} 500 +sample_ulong_bin_ctr{instid="600",agent="sample",hostname="HOSTNAME",instname="bin-600",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="600"} 600 +sample_ulong_bin_ctr{instid="700",agent="sample",hostname="HOSTNAME",instname="bin-700",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="700"} 700 +sample_ulong_bin_ctr{instid="800",agent="sample",hostname="HOSTNAME",instname="bin-800",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="800"} 800 +sample_ulong_bin_ctr{instid="900",agent="sample",hostname="HOSTNAME",instname="bin-900",machineid="MACHINE",role="testing",cluster="zero",domainname="DOMAIN",bin="900"} 900 sleeping briefly to expire contexts context #### metadata names=kernel.all.nprocs response code 400 diff --git a/qa/src/sha1ext2int.c b/qa/src/sha1ext2int.c index eee14c5cd79..9b5610e1302 100644 --- a/qa/src/sha1ext2int.c +++ b/qa/src/sha1ext2int.c @@ -8,7 +8,7 @@ * $ ./sha1ext2int --raw 09be7733f1b5ed42572d26928a5e56ccf91ea8b8 */ #include -#include "sds.h" +#include /* Input 40-byte SHA1 hash, output 20-byte representation */ static unsigned char * diff --git a/qa/src/sha1int2ext.c b/qa/src/sha1int2ext.c index 2da973b7193..bd36ceab19a 100644 --- a/qa/src/sha1int2ext.c +++ b/qa/src/sha1int2ext.c @@ -7,7 +7,7 @@ * 09be7733f1b5ed42572d26928a5e56ccf91ea8b8 */ #include -#include "sds.h" +#include static char * hash_identity(const unsigned char *hash, char *buffer, int buflen) diff --git a/qa/src/test_encodings.c b/qa/src/test_encodings.c index 28196800d30..cca7f4e931f 100644 --- a/qa/src/test_encodings.c +++ b/qa/src/test_encodings.c @@ -18,7 +18,7 @@ #include #include #include "libpcp.h" -#include "sds.h" +#include static void test_urlencode(char *input, char *expected) { diff --git a/scripts/cull-build-warnings b/scripts/cull-build-warnings index 91df01fcaf2..bd6a464223f 100755 --- a/scripts/cull-build-warnings +++ b/scripts/cull-build-warnings @@ -8,7 +8,7 @@ # The things we're culling are # - false warnings, often from older toolchains # - warnings from code the is not part of PCP proper, e.g. the qwt library -# and the vendor code (hiredis, hiredis-cluster, jasonl, libbpf-tools, +# and the vendor code (libvalkey, jasonl, libbpf-tools, # bpftool, htop, etc) ... we assume fixed these is "someone else's # problem" # - things's we triaged and decided are OK, e.g. rand() or random() use @@ -56,10 +56,9 @@ mv $tmp.out $tmp.in sed <$tmp.in >$tmp.out \ -e '/^qwt_/d' \ -e '/^\.\/qwt_/d' \ - -e '/^deps\/hiredis\/async.c[ :].* unused variable /d' \ - -e '/^deps\/hiredis\/sds.h[ :].* declaration of .sh. shadows a previous local/d' \ - -e '/^deps\/hiredis\/sds.c[ :].* declaration of .sh. shadows a previous local/d' \ - -e '/^deps\/hiredis\/sds.c[ :].* declaration shadows a local variable/d' \ + -e '/^deps\/libvalkey\/src\/sds.h[ :].* declaration of .sh. shadows a previous local/d' \ + -e '/^deps\/libvalkey\/src\/sds.c[ :].* declaration of .sh. shadows a previous local/d' \ + -e '/^deps\/libvalkey\/src\/sds.c[ :].* declaration shadows a local variable/d' \ -e '/\/selinux\/.*\.if[ :].* duplicate definition of /d' \ -e '/^libbpf.c[ :].* comparison of integer expressions of different signedness/d' \ -e '/^libbpf.c[ :].* comparison between signed and unsigned integer expressions/d' \ diff --git a/src/external/dict.c b/src/external/dict.c deleted file mode 100644 index 73360c189ff..00000000000 --- a/src/external/dict.c +++ /dev/null @@ -1,1228 +0,0 @@ -/* Hash Tables Implementation. - * - * This file implements in memory hash tables with insert/del/replace/find/ - * get-random-element operations. Hash tables will auto resize if needed - * tables of power of two in size are used, collisions are handled by - * chaining. See the source code for more information... :) - * - * Copyright (c) 2006-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "dict.h" -#include "zmalloc.h" -#include - -/* Using dictEnableResize() / dictDisableResize() we make possible to - * enable/disable resizing of the hash table as needed. This is very important - * for Redis, as we use copy-on-write and don't want to move too much memory - * around when there is a child performing saving operations. - * - * Note that even when dict_can_resize is set to 0, not all resizes are - * prevented: a hash table is still allowed to grow if the ratio between - * the number of elements and the buckets > dict_force_resize_ratio. */ -static int dict_can_resize = 1; -static unsigned int dict_force_resize_ratio = 5; - -/* -------------------------- private prototypes ---------------------------- */ - -static int _dictExpandIfNeeded(dict *ht); -static unsigned long _dictNextPower(unsigned long size); -static long _dictKeyIndex(dict *ht, const void *key, uint64_t hash, dictEntry **existing); -static int _dictInit(dict *ht, dictType *type, void *privDataPtr); - -/* -------------------------- hash functions -------------------------------- */ - -static uint8_t dict_hash_function_seed[16]; - -void dictSetHashFunctionSeed(uint8_t *seed) { - memcpy(dict_hash_function_seed,seed,sizeof(dict_hash_function_seed)); -} - -uint8_t *dictGetHashFunctionSeed(void) { - return dict_hash_function_seed; -} - -/* The default hashing function uses SipHash implementation - * in siphash.c. */ - -uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k); -uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k); - -uint64_t dictGenHashFunction(const void *key, int len) { - return siphash(key,len,dict_hash_function_seed); -} - -uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len) { - return siphash_nocase(buf,len,dict_hash_function_seed); -} - -/* ----------------------------- API implementation ------------------------- */ - -/* Reset a hash table already initialized with ht_init(). - * NOTE: This function should only be called by ht_destroy(). */ -static void _dictReset(dictht *ht) -{ - ht->table = NULL; - ht->size = 0; - ht->sizemask = 0; - ht->used = 0; -} - -/* Create a new hash table */ -dict *dictCreate(dictType *type, - void *privDataPtr) -{ - dict *d = zmalloc(sizeof(*d)); - - _dictInit(d,type,privDataPtr); - return d; -} - -/* Initialize the hash table */ -int _dictInit(dict *d, dictType *type, - void *privDataPtr) -{ - _dictReset(&d->ht[0]); - _dictReset(&d->ht[1]); - d->type = type; - d->privdata = privDataPtr; - d->rehashidx = -1; - d->iterators = 0; - return DICT_OK; -} - -/* Resize the table to the minimal size that contains all the elements, - * but with the invariant of a USED/BUCKETS ratio near to <= 1 */ -int dictResize(dict *d) -{ - int minimal; - - if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR; - minimal = d->ht[0].used; - if (minimal < DICT_HT_INITIAL_SIZE) - minimal = DICT_HT_INITIAL_SIZE; - return dictExpand(d, minimal); -} - -/* Expand or create the hash table */ -int dictExpand(dict *d, unsigned long size) -{ - dictht n; /* the new hash table */ - unsigned long realsize = _dictNextPower(size); - - /* the size is invalid if it is smaller than the number of - * elements already inside the hash table */ - if (dictIsRehashing(d) || d->ht[0].used > size) - return DICT_ERR; - - /* Rehashing to the same table size is not useful. */ - if (realsize == d->ht[0].size) return DICT_ERR; - - /* Allocate the new hash table and initialize all pointers to NULL */ - n.size = realsize; - n.sizemask = realsize-1; - n.table = zcalloc(realsize, sizeof(dictEntry*)); - n.used = 0; - - /* Is this the first initialization? If so it's not really a rehashing - * we just set the first hash table so that it can accept keys. */ - if (d->ht[0].table == NULL) { - d->ht[0] = n; - return DICT_OK; - } - - /* Prepare a second hash table for incremental rehashing */ - d->ht[1] = n; - d->rehashidx = 0; - return DICT_OK; -} - -/* Performs N steps of incremental rehashing. Returns 1 if there are still - * keys to move from the old to the new hash table, otherwise 0 is returned. - * - * Note that a rehashing step consists in moving a bucket (that may have more - * than one key as we use chaining) from the old to the new hash table, however - * since part of the hash table may be composed of empty spaces, it is not - * guaranteed that this function will rehash even a single bucket, since it - * will visit at max N*10 empty buckets in total, otherwise the amount of - * work it does would be unbound and the function may block for a long time. */ -int dictRehash(dict *d, int n) { - int empty_visits = n*10; /* Max number of empty buckets to visit. */ - if (!dictIsRehashing(d)) return 0; - - while(n-- && d->ht[0].used != 0) { - dictEntry *de, *nextde; - - /* Note that rehashidx can't overflow as we are sure there are more - * elements because ht[0].used != 0 */ - assert(d->ht[0].size > (unsigned long)d->rehashidx); - while(d->ht[0].table[d->rehashidx] == NULL) { - d->rehashidx++; - if (--empty_visits == 0) return 1; - } - de = d->ht[0].table[d->rehashidx]; - /* Move all the keys in this bucket from the old to the new hash HT */ - while(de) { - uint64_t h; - - nextde = de->next; - /* Get the index in the new hash table */ - h = dictHashKey(d, de->key) & d->ht[1].sizemask; - de->next = d->ht[1].table[h]; - d->ht[1].table[h] = de; - d->ht[0].used--; - d->ht[1].used++; - de = nextde; - } - d->ht[0].table[d->rehashidx] = NULL; - d->rehashidx++; - } - - /* Check if we already rehashed the whole table... */ - if (d->ht[0].used == 0) { - zfree(d->ht[0].table); - d->ht[0] = d->ht[1]; - _dictReset(&d->ht[1]); - d->rehashidx = -1; - return 0; - } - - /* More to rehash... */ - return 1; -} - -long long timeInMilliseconds(void) { - struct timeval tv; - - gettimeofday(&tv,NULL); - return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000); -} - -/* Rehash for an amount of time between ms milliseconds and ms+1 milliseconds */ -int dictRehashMilliseconds(dict *d, int ms) { - long long start = timeInMilliseconds(); - int rehashes = 0; - - while(dictRehash(d,100)) { - rehashes += 100; - if (timeInMilliseconds()-start > ms) break; - } - return rehashes; -} - -/* This function performs just a step of rehashing, and only if there are - * no safe iterators bound to our hash table. When we have iterators in the - * middle of a rehashing we can't mess with the two hash tables otherwise - * some element can be missed or duplicated. - * - * This function is called by common lookup or update operations in the - * dictionary so that the hash table automatically migrates from H1 to H2 - * while it is actively used. */ -static void _dictRehashStep(dict *d) { - if (d->iterators == 0) dictRehash(d,1); -} - -/* Add an element to the target hash table */ -int dictAdd(dict *d, void *key, void *val) -{ - dictEntry *entry = dictAddRaw(d,key,NULL); - - if (!entry) return DICT_ERR; - dictSetVal(d, entry, val); - return DICT_OK; -} - -/* Low level add or find: - * This function adds the entry but instead of setting a value returns the - * dictEntry structure to the user, that will make sure to fill the value - * field as he wishes. - * - * This function is also directly exposed to the user API to be called - * mainly in order to store non-pointers inside the hash value, example: - * - * entry = dictAddRaw(dict,mykey,NULL); - * if (entry != NULL) dictSetSignedIntegerVal(entry,1000); - * - * Return values: - * - * If key already exists NULL is returned, and "*existing" is populated - * with the existing entry if existing is not NULL. - * - * If key was added, the hash entry is returned to be manipulated by the caller. - */ -dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing) -{ - long index; - dictEntry *entry; - dictht *ht; - - if (dictIsRehashing(d)) _dictRehashStep(d); - - /* Get the index of the new element, or -1 if - * the element already exists. */ - if ((index = _dictKeyIndex(d, key, dictHashKey(d,key), existing)) == -1) - return NULL; - - /* Allocate the memory and store the new entry. - * Insert the element in top, with the assumption that in a database - * system it is more likely that recently added entries are accessed - * more frequently. */ - ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0]; - entry = zmalloc(sizeof(*entry)); - entry->next = ht->table[index]; - ht->table[index] = entry; - ht->used++; - - /* Set the hash entry fields. */ - dictSetKey(d, entry, key); - return entry; -} - -/* Add or Overwrite: - * Add an element, discarding the old value if the key already exists. - * Return 1 if the key was added from scratch, 0 if there was already an - * element with such key and dictReplace() just performed a value update - * operation. */ -int dictReplace(dict *d, void *key, void *val) -{ - dictEntry *entry, *existing, auxentry; - - /* Try to add the element. If the key - * does not exists dictAdd will suceed. */ - entry = dictAddRaw(d,key,&existing); - if (entry) { - dictSetVal(d, entry, val); - return 1; - } - - /* Set the new value and free the old one. Note that it is important - * to do that in this order, as the value may just be exactly the same - * as the previous one. In this context, think to reference counting, - * you want to increment (set), and then decrement (free), and not the - * reverse. */ - auxentry = *existing; - dictSetVal(d, existing, val); - dictFreeVal(d, &auxentry); - return 0; -} - -/* Add or Find: - * dictAddOrFind() is simply a version of dictAddRaw() that always - * returns the hash entry of the specified key, even if the key already - * exists and can't be added (in that case the entry of the already - * existing key is returned.) - * - * See dictAddRaw() for more information. */ -dictEntry *dictAddOrFind(dict *d, void *key) { - dictEntry *entry, *existing; - entry = dictAddRaw(d,key,&existing); - return entry ? entry : existing; -} - -/* Search and remove an element. This is an helper function for - * dictDelete() and dictUnlink(), please check the top comment - * of those functions. */ -static dictEntry *dictGenericDelete(dict *d, const void *key, int nofree) { - uint64_t h, idx; - dictEntry *he, *prevHe; - int table; - - if (d->ht[0].used == 0 && d->ht[1].used == 0) return NULL; - - if (dictIsRehashing(d)) _dictRehashStep(d); - h = dictHashKey(d, key); - - for (table = 0; table <= 1; table++) { - idx = h & d->ht[table].sizemask; - he = d->ht[table].table[idx]; - prevHe = NULL; - while(he) { - if (key==he->key || dictCompareKeys(d, key, he->key)) { - /* Unlink the element from the list */ - if (prevHe) - prevHe->next = he->next; - else - d->ht[table].table[idx] = he->next; - if (!nofree) { - dictFreeKey(d, he); - dictFreeVal(d, he); - zfree(he); - } - d->ht[table].used--; - return he; - } - prevHe = he; - he = he->next; - } - if (!dictIsRehashing(d)) break; - } - return NULL; /* not found */ -} - -/* Remove an element, returning DICT_OK on success or DICT_ERR if the - * element was not found. */ -int dictDelete(dict *ht, const void *key) { - return dictGenericDelete(ht,key,0) ? DICT_OK : DICT_ERR; -} - -/* Remove an element from the table, but without actually releasing - * the key, value and dictionary entry. The dictionary entry is returned - * if the element was found (and unlinked from the table), and the user - * should later call `dictFreeUnlinkedEntry()` with it in order to release it. - * Otherwise if the key is not found, NULL is returned. - * - * This function is useful when we want to remove something from the hash - * table but want to use its value before actually deleting the entry. - * Without this function the pattern would require two lookups: - * - * entry = dictFind(...); - * // Do something with entry - * dictDelete(dictionary,entry); - * - * Thanks to this function it is possible to avoid this, and use - * instead: - * - * entry = dictUnlink(dictionary,entry); - * // Do something with entry - * dictFreeUnlinkedEntry(entry); // <- This does not need to lookup again. - */ -dictEntry *dictUnlink(dict *ht, const void *key) { - return dictGenericDelete(ht,key,1); -} - -/* You need to call this function to really free the entry after a call - * to dictUnlink(). It's safe to call this function with 'he' = NULL. */ -void dictFreeUnlinkedEntry(dict *d, dictEntry *he) { - if (he == NULL) return; - dictFreeKey(d, he); - dictFreeVal(d, he); - zfree(he); -} - -/* Destroy an entire dictionary */ -int _dictClear(dict *d, dictht *ht, void(callback)(void *)) { - unsigned long i; - - /* Free all the elements */ - for (i = 0; i < ht->size && ht->used > 0; i++) { - dictEntry *he, *nextHe; - - if (callback && (i & 65535) == 0) callback(d->privdata); - - if ((he = ht->table[i]) == NULL) continue; - while(he) { - nextHe = he->next; - dictFreeKey(d, he); - dictFreeVal(d, he); - zfree(he); - ht->used--; - he = nextHe; - } - } - /* Free the table and the allocated cache structure */ - zfree(ht->table); - /* Re-initialize the table */ - _dictReset(ht); - return DICT_OK; /* never fails */ -} - -/* Clear & Release the hash table */ -void dictRelease(dict *d) -{ - _dictClear(d,&d->ht[0],NULL); - _dictClear(d,&d->ht[1],NULL); - zfree(d); -} - -dictEntry *dictFind(dict *d, const void *key) -{ - dictEntry *he; - uint64_t h, idx, table; - - if (d->ht[0].used + d->ht[1].used == 0) return NULL; /* dict is empty */ - if (dictIsRehashing(d)) _dictRehashStep(d); - h = dictHashKey(d, key); - for (table = 0; table <= 1; table++) { - idx = h & d->ht[table].sizemask; - he = d->ht[table].table[idx]; - while(he) { - if (key==he->key || dictCompareKeys(d, key, he->key)) - return he; - he = he->next; - } - if (!dictIsRehashing(d)) return NULL; - } - return NULL; -} - -void *dictFetchValue(dict *d, const void *key) { - dictEntry *he; - - he = dictFind(d,key); - return he ? dictGetVal(he) : NULL; -} - -/* A fingerprint is a 64 bit number that represents the state of the dictionary - * at a given time, it's just a few dict properties xored together. - * When an unsafe iterator is initialized, we get the dict fingerprint, and check - * the fingerprint again when the iterator is released. - * If the two fingerprints are different it means that the user of the iterator - * performed forbidden operations against the dictionary while iterating. */ -long long dictFingerprint(dict *d) { - long long integers[6], hash = 0; - int j; - - integers[0] = (long) d->ht[0].table; - integers[1] = d->ht[0].size; - integers[2] = d->ht[0].used; - integers[3] = (long) d->ht[1].table; - integers[4] = d->ht[1].size; - integers[5] = d->ht[1].used; - - /* We hash N integers by summing every successive integer with the integer - * hashing of the previous sum. Basically: - * - * Result = hash(hash(hash(int1)+int2)+int3) ... - * - * This way the same set of integers in a different order will (likely) hash - * to a different number. */ - for (j = 0; j < 6; j++) { - hash += integers[j]; - /* For the hashing step we use Tomas Wang's 64 bit integer hash. */ - hash = (~hash) + (hash << 21); // hash = (hash << 21) - hash - 1; - hash = hash ^ (hash >> 24); - hash = (hash + (hash << 3)) + (hash << 8); // hash * 265 - hash = hash ^ (hash >> 14); - hash = (hash + (hash << 2)) + (hash << 4); // hash * 21 - hash = hash ^ (hash >> 28); - hash = hash + (hash << 31); - } - return hash; -} - -dictIterator *dictGetIterator(dict *d) -{ - dictIterator *iter = zmalloc(sizeof(*iter)); - - iter->d = d; - iter->table = 0; - iter->index = -1; - iter->safe = 0; - iter->entry = NULL; - iter->nextEntry = NULL; - return iter; -} - -dictIterator *dictGetSafeIterator(dict *d) { - dictIterator *i = dictGetIterator(d); - - i->safe = 1; - return i; -} - -dictEntry *dictNext(dictIterator *iter) -{ - while (1) { - if (iter->entry == NULL) { - dictht *ht = &iter->d->ht[iter->table]; - if (iter->index == -1 && iter->table == 0) { - if (iter->safe) - iter->d->iterators++; - else - iter->fingerprint = dictFingerprint(iter->d); - } - iter->index++; - if (iter->index >= (long) ht->size) { - if (dictIsRehashing(iter->d) && iter->table == 0) { - iter->table++; - iter->index = 0; - ht = &iter->d->ht[1]; - } else { - break; - } - } - iter->entry = ht->table[iter->index]; - } else { - iter->entry = iter->nextEntry; - } - if (iter->entry) { - /* We need to save the 'next' here, the iterator user - * may delete the entry we are returning. */ - iter->nextEntry = iter->entry->next; - return iter->entry; - } - } - return NULL; -} - -void dictReleaseIterator(dictIterator *iter) -{ - if (!(iter->index == -1 && iter->table == 0)) { - if (iter->safe) - iter->d->iterators--; - else - assert(iter->fingerprint == dictFingerprint(iter->d)); - } - zfree(iter); -} - -/* Return a random entry from the hash table. Useful to - * implement randomized algorithms */ -dictEntry *dictGetRandomKey(dict *d) -{ - dictEntry *he, *orighe; - unsigned long h; - int listlen, listele; - - if (dictSize(d) == 0) return NULL; - if (dictIsRehashing(d)) _dictRehashStep(d); - if (dictIsRehashing(d)) { - do { - /* We are sure there are no elements in indexes from 0 - * to rehashidx-1 */ - h = d->rehashidx + (random() % (d->ht[0].size + - d->ht[1].size - - d->rehashidx)); - he = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] : - d->ht[0].table[h]; - } while(he == NULL); - } else { - do { - h = random() & d->ht[0].sizemask; - he = d->ht[0].table[h]; - } while(he == NULL); - } - - /* Now we found a non empty bucket, but it is a linked - * list and we need to get a random element from the list. - * The only sane way to do so is counting the elements and - * select a random index. */ - listlen = 0; - orighe = he; - while(he) { - he = he->next; - listlen++; - } - listele = random() % listlen; - he = orighe; - while(listele--) he = he->next; - return he; -} - -/* This function samples the dictionary to return a few keys from random - * locations. - * - * It does not guarantee to return all the keys specified in 'count', nor - * it does guarantee to return non-duplicated elements, however it will make - * some effort to do both things. - * - * Returned pointers to hash table entries are stored into 'des' that - * points to an array of dictEntry pointers. The array must have room for - * at least 'count' elements, that is the argument we pass to the function - * to tell how many random elements we need. - * - * The function returns the number of items stored into 'des', that may - * be less than 'count' if the hash table has less than 'count' elements - * inside, or if not enough elements were found in a reasonable amount of - * steps. - * - * Note that this function is not suitable when you need a good distribution - * of the returned items, but only when you need to "sample" a given number - * of continuous elements to run some kind of algorithm or to produce - * statistics. However the function is much faster than dictGetRandomKey() - * at producing N elements. */ -unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { - unsigned long j; /* internal hash table id, 0 or 1. */ - unsigned long tables; /* 1 or 2 tables? */ - unsigned long stored = 0, maxsizemask; - unsigned long maxsteps; - - if (dictSize(d) < count) count = dictSize(d); - maxsteps = count*10; - - /* Try to do a rehashing work proportional to 'count'. */ - for (j = 0; j < count; j++) { - if (dictIsRehashing(d)) - _dictRehashStep(d); - else - break; - } - - tables = dictIsRehashing(d) ? 2 : 1; - maxsizemask = d->ht[0].sizemask; - if (tables > 1 && maxsizemask < d->ht[1].sizemask) - maxsizemask = d->ht[1].sizemask; - - /* Pick a random point inside the larger table. */ - unsigned long i = random() & maxsizemask; - unsigned long emptylen = 0; /* Continuous empty entries so far. */ - while(stored < count && maxsteps--) { - for (j = 0; j < tables; j++) { - /* Invariant of the dict.c rehashing: up to the indexes already - * visited in ht[0] during the rehashing, there are no populated - * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ - if (tables == 2 && j == 0 && i < (unsigned long) d->rehashidx) { - /* Moreover, if we are currently out of range in the second - * table, there will be no elements in both tables up to - * the current rehashing index, so we jump if possible. - * (this happens when going from big to small table). */ - if (i >= d->ht[1].size) i = d->rehashidx; - continue; - } - if (i >= d->ht[j].size) continue; /* Out of range for this table. */ - dictEntry *he = d->ht[j].table[i]; - - /* Count contiguous empty buckets, and jump to other - * locations if they reach 'count' (with a minimum of 5). */ - if (he == NULL) { - emptylen++; - if (emptylen >= 5 && emptylen > count) { - i = random() & maxsizemask; - emptylen = 0; - } - } else { - emptylen = 0; - while (he) { - /* Collect all the elements of the buckets found non - * empty while iterating. */ - *des = he; - des++; - he = he->next; - stored++; - if (stored == count) return stored; - } - } - } - i = (i+1) & maxsizemask; - } - return stored; -} - -/* Function to reverse bits. Algorithm from: - * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */ -static unsigned long rev(unsigned long v) { - unsigned long s = 8 * sizeof(v); // bit size; must be power of 2 - unsigned long mask = ~0; - while ((s >>= 1) > 0) { - mask ^= (mask << s); - v = ((v >> s) & mask) | ((v << s) & ~mask); - } - return v; -} - -/* dictScan() is used to iterate over the elements of a dictionary. - * - * Iterating works the following way: - * - * 1) Initially you call the function using a cursor (v) value of 0. - * 2) The function performs one step of the iteration, and returns the - * new cursor value you must use in the next call. - * 3) When the returned cursor is 0, the iteration is complete. - * - * The function guarantees all elements present in the - * dictionary get returned between the start and end of the iteration. - * However it is possible some elements get returned multiple times. - * - * For every element returned, the callback argument 'fn' is - * called with 'privdata' as first argument and the dictionary entry - * 'de' as second argument. - * - * HOW IT WORKS. - * - * The iteration algorithm was designed by Pieter Noordhuis. - * The main idea is to increment a cursor starting from the higher order - * bits. That is, instead of incrementing the cursor normally, the bits - * of the cursor are reversed, then the cursor is incremented, and finally - * the bits are reversed again. - * - * This strategy is needed because the hash table may be resized between - * iteration calls. - * - * dict.c hash tables are always power of two in size, and they - * use chaining, so the position of an element in a given table is given - * by computing the bitwise AND between Hash(key) and SIZE-1 - * (where SIZE-1 is always the mask that is equivalent to taking the rest - * of the division between the Hash of the key and SIZE). - * - * For example if the current hash table size is 16, the mask is - * (in binary) 1111. The position of a key in the hash table will always be - * the last four bits of the hash output, and so forth. - * - * WHAT HAPPENS IF THE TABLE CHANGES IN SIZE? - * - * If the hash table grows, elements can go anywhere in one multiple of - * the old bucket: for example let's say we already iterated with - * a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16). - * - * If the hash table will be resized to 64 elements, then the new mask will - * be 111111. The new buckets you obtain by substituting in ??1100 - * with either 0 or 1 can be targeted only by keys we already visited - * when scanning the bucket 1100 in the smaller hash table. - * - * By iterating the higher bits first, because of the inverted counter, the - * cursor does not need to restart if the table size gets bigger. It will - * continue iterating using cursors without '1100' at the end, and also - * without any other combination of the final 4 bits already explored. - * - * Similarly when the table size shrinks over time, for example going from - * 16 to 8, if a combination of the lower three bits (the mask for size 8 - * is 111) were already completely explored, it would not be visited again - * because we are sure we tried, for example, both 0111 and 1111 (all the - * variations of the higher bit) so we don't need to test it again. - * - * WAIT... YOU HAVE *TWO* TABLES DURING REHASHING! - * - * Yes, this is true, but we always iterate the smaller table first, then - * we test all the expansions of the current cursor into the larger - * table. For example if the current cursor is 101 and we also have a - * larger table of size 16, we also test (0)101 and (1)101 inside the larger - * table. This reduces the problem back to having only one table, where - * the larger one, if it exists, is just an expansion of the smaller one. - * - * LIMITATIONS - * - * This iterator is completely stateless, and this is a huge advantage, - * including no additional memory used. - * - * The disadvantages resulting from this design are: - * - * 1) It is possible we return elements more than once. However this is usually - * easy to deal with in the application level. - * 2) The iterator must return multiple elements per call, as it needs to always - * return all the keys chained in a given bucket, and all the expansions, so - * we are sure we don't miss keys moving during rehashing. - * 3) The reverse cursor is somewhat hard to understand at first, but this - * comment is supposed to help. - */ -unsigned long dictScan(dict *d, - unsigned long v, - dictScanFunction *fn, - dictScanBucketFunction* bucketfn, - void *privdata) -{ - dictht *t0, *t1; - const dictEntry *de, *next; - unsigned long m0, m1; - - if (dictSize(d) == 0) return 0; - - if (!dictIsRehashing(d)) { - t0 = &(d->ht[0]); - m0 = t0->sizemask; - - /* Emit entries at cursor */ - if (bucketfn) bucketfn(privdata, &t0->table[v & m0]); - de = t0->table[v & m0]; - while (de) { - next = de->next; - fn(privdata, de); - de = next; - } - - } else { - t0 = &d->ht[0]; - t1 = &d->ht[1]; - - /* Make sure t0 is the smaller and t1 is the bigger table */ - if (t0->size > t1->size) { - t0 = &d->ht[1]; - t1 = &d->ht[0]; - } - - m0 = t0->sizemask; - m1 = t1->sizemask; - - /* Emit entries at cursor */ - if (bucketfn) bucketfn(privdata, &t0->table[v & m0]); - de = t0->table[v & m0]; - while (de) { - next = de->next; - fn(privdata, de); - de = next; - } - - /* Iterate over indices in larger table that are the expansion - * of the index pointed to by the cursor in the smaller table */ - do { - /* Emit entries at cursor */ - if (bucketfn) bucketfn(privdata, &t1->table[v & m1]); - de = t1->table[v & m1]; - while (de) { - next = de->next; - fn(privdata, de); - de = next; - } - - /* Increment bits not covered by the smaller mask */ - v = (((v | m0) + 1) & ~m0) | (v & m0); - - /* Continue while bits covered by mask difference is non-zero */ - } while (v & (m0 ^ m1)); - } - - /* Set unmasked bits so incrementing the reversed cursor - * operates on the masked bits of the smaller table */ - v |= ~m0; - - /* Increment the reverse cursor */ - v = rev(v); - v++; - v = rev(v); - - return v; -} - -/* ------------------ compat functions required in hiredis ------------------ */ -void dictInitIterator(dictIterator *iter, dict *d) -{ - // please refer to dictGetSafeIterator() - iter->d = d; - iter->table = 0; - iter->index = -1; - iter->safe = 1; - iter->entry = NULL; - iter->nextEntry = NULL; -} - -/* ------------------------- private functions ------------------------------ */ - -/* Expand the hash table if needed */ -static int _dictExpandIfNeeded(dict *d) -{ - /* Incremental rehashing already in progress. Return. */ - if (dictIsRehashing(d)) return DICT_OK; - - /* If the hash table is empty expand it to the initial size. */ - if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE); - - /* If we reached the 1:1 ratio, and we are allowed to resize the hash - * table (global setting) or we should avoid it but the ratio between - * elements/buckets is over the "safe" threshold, we resize doubling - * the number of buckets. */ - if (d->ht[0].used >= d->ht[0].size && - (dict_can_resize || - d->ht[0].used/d->ht[0].size > dict_force_resize_ratio)) - { - return dictExpand(d, d->ht[0].used*2); - } - return DICT_OK; -} - -/* Our hash table capability is a power of two */ -static unsigned long _dictNextPower(unsigned long size) -{ - unsigned long i = DICT_HT_INITIAL_SIZE; - - if (size >= LONG_MAX) return LONG_MAX + 1LU; - while(1) { - if (i >= size) - return i; - i *= 2; - } -} - -/* Returns the index of a free slot that can be populated with - * a hash entry for the given 'key'. - * If the key already exists, -1 is returned - * and the optional output parameter may be filled. - * - * Note that if we are in the process of rehashing the hash table, the - * index is always returned in the context of the second (new) hash table. */ -static long _dictKeyIndex(dict *d, const void *key, uint64_t hash, dictEntry **existing) -{ - unsigned long idx, table; - dictEntry *he; - if (existing) *existing = NULL; - - /* Expand the hash table if needed */ - if (_dictExpandIfNeeded(d) == DICT_ERR) - return -1; - for (table = 0; table <= 1; table++) { - idx = hash & d->ht[table].sizemask; - /* Search if this slot does not already contain the given key */ - he = d->ht[table].table[idx]; - while(he) { - if (key==he->key || dictCompareKeys(d, key, he->key)) { - if (existing) *existing = he; - return -1; - } - he = he->next; - } - if (!dictIsRehashing(d)) break; - } - return idx; -} - -void dictEmpty(dict *d, void(callback)(void*)) { - _dictClear(d,&d->ht[0],callback); - _dictClear(d,&d->ht[1],callback); - d->rehashidx = -1; - d->iterators = 0; -} - -void dictEnableResize(void) { - dict_can_resize = 1; -} - -void dictDisableResize(void) { - dict_can_resize = 0; -} - -uint64_t dictGetHash(dict *d, const void *key) { - return dictHashKey(d, key); -} - -/* Finds the dictEntry reference by using pointer and pre-calculated hash. - * oldkey is a dead pointer and should not be accessed. - * the hash value should be provided using dictGetHash. - * no string / key comparison is performed. - * return value is the reference to the dictEntry if found, or NULL if not found. */ -dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t hash) { - dictEntry *he, **heref; - unsigned long idx, table; - - if (d->ht[0].used + d->ht[1].used == 0) return NULL; /* dict is empty */ - for (table = 0; table <= 1; table++) { - idx = hash & d->ht[table].sizemask; - heref = &d->ht[table].table[idx]; - he = *heref; - while(he) { - if (oldptr==he->key) - return heref; - heref = &he->next; - he = *heref; - } - if (!dictIsRehashing(d)) return NULL; - } - return NULL; -} - -/* ------------------------------- Debugging ---------------------------------*/ - -#define DICT_STATS_VECTLEN 50 -size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) { - unsigned long i, slots = 0, chainlen, maxchainlen = 0; - unsigned long totchainlen = 0; - unsigned long clvector[DICT_STATS_VECTLEN]; - size_t l = 0; - - if (ht->used == 0) { - return snprintf(buf,bufsize, - "No stats available for empty dictionaries\n"); - } - - /* Compute stats. */ - for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0; - for (i = 0; i < ht->size; i++) { - dictEntry *he; - - if (ht->table[i] == NULL) { - clvector[0]++; - continue; - } - slots++; - /* For each hash entry on this slot... */ - chainlen = 0; - he = ht->table[i]; - while(he) { - chainlen++; - he = he->next; - } - clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++; - if (chainlen > maxchainlen) maxchainlen = chainlen; - totchainlen += chainlen; - } - - /* Generate human readable stats. */ - l += snprintf(buf+l,bufsize-l, - "Hash table %d stats (%s):\n" - " table size: %ld\n" - " number of elements: %ld\n" - " different slots: %ld\n" - " max chain length: %ld\n" - " avg chain length (counted): %.02f\n" - " avg chain length (computed): %.02f\n" - " Chain length distribution:\n", - tableid, (tableid == 0) ? "main hash table" : "rehashing target", - ht->size, ht->used, slots, maxchainlen, - slots > 0 ? (float)totchainlen/slots : 0.0, - slots > 0 ? (float)ht->used/slots : 0.0); - - for (i = 0; i <= DICT_STATS_VECTLEN-1; i++) { - if (clvector[i] == 0) continue; - if (l >= bufsize) break; - l += snprintf(buf+l,bufsize-l, - " %s%ld: %ld (%.02f%%)\n", - (i == DICT_STATS_VECTLEN-1)?">= ":"", - i, clvector[i], ((float)clvector[i]/ht->size)*100); - } - - /* Unlike snprintf(), return the number of characters actually written. */ - if (bufsize) buf[bufsize-1] = '\0'; - return strlen(buf); -} - -void dictGetStats(char *buf, size_t bufsize, dict *d) { - size_t l; - char *orig_buf = buf; - size_t orig_bufsize = bufsize; - - l = _dictGetStatsHt(buf,bufsize,&d->ht[0],0); - buf += l; - bufsize -= l; - if (dictIsRehashing(d) && bufsize > 0) { - _dictGetStatsHt(buf,bufsize,&d->ht[1],1); - } - /* Make sure there is a NULL term at the end. */ - if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0'; -} - -/* ------------------------------- Benchmark ---------------------------------*/ - -#ifdef DICT_BENCHMARK_MAIN - -#include "sds.h" - -uint64_t hashCallback(const void *key) { - return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); -} - -int compareCallback(void *privdata, const void *key1, const void *key2) { - int l1,l2; - DICT_NOTUSED(privdata); - - l1 = sdslen((sds)key1); - l2 = sdslen((sds)key2); - if (l1 != l2) return 0; - return memcmp(key1, key2, l1) == 0; -} - -void freeCallback(void *privdata, void *val) { - DICT_NOTUSED(privdata); - - sdsfree(val); -} - -dictType BenchmarkDictType = { - hashCallback, - NULL, - NULL, - compareCallback, - freeCallback, - NULL -}; - -#define start_benchmark() start = timeInMilliseconds() -#define end_benchmark(msg) do { \ - elapsed = timeInMilliseconds()-start; \ - printf(msg ": %ld items in %lld ms\n", count, elapsed); \ -} while(0); - -/* dict-benchmark [count] */ -int main(int argc, char **argv) { - long j; - long long start, elapsed; - dict *dict = dictCreate(&BenchmarkDictType,NULL); - long count = 0; - - if (argc == 2) { - count = strtol(argv[1],NULL,10); - } else { - count = 5000000; - } - - start_benchmark(); - for (j = 0; j < count; j++) { - int retval = dictAdd(dict,sdsfromlonglong(j),(void*)j); - assert(retval == DICT_OK); - } - end_benchmark("Inserting"); - assert((long)dictSize(dict) == count); - - /* Wait for rehashing. */ - while (dictIsRehashing(dict)) { - dictRehashMilliseconds(dict,100); - } - - start_benchmark(); - for (j = 0; j < count; j++) { - sds key = sdsfromlonglong(j); - dictEntry *de = dictFind(dict,key); - assert(de != NULL); - sdsfree(key); - } - end_benchmark("Linear access of existing elements"); - - start_benchmark(); - for (j = 0; j < count; j++) { - sds key = sdsfromlonglong(j); - dictEntry *de = dictFind(dict,key); - assert(de != NULL); - sdsfree(key); - } - end_benchmark("Linear access of existing elements (2nd round)"); - - start_benchmark(); - for (j = 0; j < count; j++) { - sds key = sdsfromlonglong(rand() % count); - dictEntry *de = dictFind(dict,key); - assert(de != NULL); - sdsfree(key); - } - end_benchmark("Random access of existing elements"); - - start_benchmark(); - for (j = 0; j < count; j++) { - sds key = sdsfromlonglong(rand() % count); - key[0] = 'X'; - dictEntry *de = dictFind(dict,key); - assert(de == NULL); - sdsfree(key); - } - end_benchmark("Accessing missing"); - - start_benchmark(); - for (j = 0; j < count; j++) { - sds key = sdsfromlonglong(j); - int retval = dictDelete(dict,key); - assert(retval == DICT_OK); - key[0] += 17; /* Change first number to letter. */ - retval = dictAdd(dict,key,(void*)j); - assert(retval == DICT_OK); - } - end_benchmark("Removing and adding"); -} -#endif diff --git a/src/external/dict.h b/src/external/dict.h deleted file mode 100644 index 6575d174fa9..00000000000 --- a/src/external/dict.h +++ /dev/null @@ -1,195 +0,0 @@ -/* Hash Tables Implementation. - * - * This file implements in-memory hash tables with insert/del/replace/find/ - * get-random-element operations. Hash tables will auto-resize if needed - * tables of power of two in size are used, collisions are handled by - * chaining. See the source code for more information... :) - * - * Copyright (c) 2006-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#ifndef __DICT_H -#define __DICT_H - -#define DICT_OK 0 -#define DICT_ERR 1 - -/* Unused arguments generate annoying warnings... */ -#define DICT_NOTUSED(V) ((void) V) - -typedef struct dictEntry { - void *key; - union { - void *val; - uint64_t u64; - int64_t s64; - double d; - } v; - struct dictEntry *next; -} dictEntry; - -typedef struct dictType { - uint64_t (*hashFunction)(const void *key); - void *(*keyDup)(void *privdata, const void *key); - void *(*valDup)(void *privdata, const void *obj); - int (*keyCompare)(void *privdata, const void *key1, const void *key2); - void (*keyDestructor)(void *privdata, void *key); - void (*valDestructor)(void *privdata, void *obj); -} dictType; - -/* This is our hash table structure. Every dictionary has two of this as we - * implement incremental rehashing, for the old to the new table. */ -typedef struct dictht { - dictEntry **table; - unsigned long size; - unsigned long sizemask; - unsigned long used; -} dictht; - -typedef struct dict { - dictType *type; - void *privdata; - dictht ht[2]; - long rehashidx; /* rehashing not in progress if rehashidx == -1 */ - unsigned long iterators; /* number of iterators currently running */ -} dict; - -/* If safe is set to 1 this is a safe iterator, that means, you can call - * dictAdd, dictFind, and other functions against the dictionary even while - * iterating. Otherwise it is a non safe iterator, and only dictNext() - * should be called while iterating. */ -typedef struct dictIterator { - dict *d; - long index; - int table, safe; - dictEntry *entry, *nextEntry; - /* unsafe iterator fingerprint for misuse detection. */ - long long fingerprint; -} dictIterator; - -typedef void (dictScanFunction)(void *privdata, const dictEntry *de); -typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref); - -/* This is the initial size of every hash table */ -#define DICT_HT_INITIAL_SIZE 4 - -/* ------------------------------- Macros ------------------------------------*/ -#define dictFreeVal(d, entry) \ - if ((d)->type->valDestructor) \ - (d)->type->valDestructor((d)->privdata, (entry)->v.val) - -#define dictSetVal(d, entry, _val_) do { \ - if ((d)->type->valDup) \ - (entry)->v.val = (d)->type->valDup((d)->privdata, _val_); \ - else \ - (entry)->v.val = (_val_); \ -} while(0) - -#define dictSetSignedIntegerVal(entry, _val_) \ - do { (entry)->v.s64 = _val_; } while(0) - -#define dictSetUnsignedIntegerVal(entry, _val_) \ - do { (entry)->v.u64 = _val_; } while(0) - -#define dictSetDoubleVal(entry, _val_) \ - do { (entry)->v.d = _val_; } while(0) - -#define dictFreeKey(d, entry) \ - if ((d)->type->keyDestructor) \ - (d)->type->keyDestructor((d)->privdata, (entry)->key) - -#define dictSetKey(d, entry, _key_) do { \ - if ((d)->type->keyDup) \ - (entry)->key = (d)->type->keyDup((d)->privdata, _key_); \ - else \ - (entry)->key = (_key_); \ -} while(0) - -#define dictCompareKeys(d, key1, key2) \ - (((d)->type->keyCompare) ? \ - (d)->type->keyCompare((d)->privdata, key1, key2) : \ - (key1) == (key2)) - -#define dictHashKey(d, key) (d)->type->hashFunction(key) -#define dictGetKey(he) ((he)->key) -#define dictGetVal(he) ((he)->v.val) -#define dictGetSignedIntegerVal(he) ((he)->v.s64) -#define dictGetUnsignedIntegerVal(he) ((he)->v.u64) -#define dictGetDoubleVal(he) ((he)->v.d) -#define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size) -#define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used) -#define dictIsRehashing(d) ((d)->rehashidx != -1) - -/* API */ -dict *dictCreate(dictType *type, void *privDataPtr); -int dictExpand(dict *d, unsigned long size); -int dictAdd(dict *d, void *key, void *val); -dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing); -dictEntry *dictAddOrFind(dict *d, void *key); -int dictReplace(dict *d, void *key, void *val); -int dictDelete(dict *d, const void *key); -dictEntry *dictUnlink(dict *ht, const void *key); -void dictFreeUnlinkedEntry(dict *d, dictEntry *he); -void dictRelease(dict *d); -dictEntry * dictFind(dict *d, const void *key); -void *dictFetchValue(dict *d, const void *key); -int dictResize(dict *d); -dictIterator *dictGetIterator(dict *d); -dictIterator *dictGetSafeIterator(dict *d); -dictEntry *dictNext(dictIterator *iter); -void dictReleaseIterator(dictIterator *iter); -dictEntry *dictGetRandomKey(dict *d); -unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count); -void dictGetStats(char *buf, size_t bufsize, dict *d); -uint64_t dictGenHashFunction(const void *key, int len); -uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len); -void dictEmpty(dict *d, void(callback)(void*)); -void dictEnableResize(void); -void dictDisableResize(void); -int dictRehash(dict *d, int n); -int dictRehashMilliseconds(dict *d, int ms); -void dictSetHashFunctionSeed(uint8_t *seed); -uint8_t *dictGetHashFunctionSeed(void); -unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, dictScanBucketFunction *bucketfn, void *privdata); -uint64_t dictGetHash(dict *d, const void *key); -dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t hash); - -/* compat functions required for hiredis */ -void dictInitIterator(dictIterator *iter, dict *d); -#define dictGetEntryVal dictGetVal -#define dictSetHashVal dictSetVal - -/* Hash table types */ -extern dictType dictTypeHeapStringCopyKey; -extern dictType dictTypeHeapStrings; -extern dictType dictTypeHeapStringCopyKeyValue; - -#endif /* __DICT_H */ diff --git a/src/include/pcp/GNUmakefile b/src/include/pcp/GNUmakefile index 2405ca2b4a5..0c33cf63003 100644 --- a/src/include/pcp/GNUmakefile +++ b/src/include/pcp/GNUmakefile @@ -18,10 +18,9 @@ include $(TOPDIR)/src/include/builddefs -include ./GNUlocaldefs INIH_HFILES = ini.h -SDSH_HFILES = sds.h sdsalloc.h USDT_HFILES = usdt.h -VENDORED_HFILES = $(INIH_HFILES) $(SDSH_HFILES) $(USDT_HFILES) -EXTERNAL_HFILES = dict.h +VENDORED_HFILES = $(INIH_HFILES) $(USDT_HFILES) +LIBVALKEY_HFILES = sds.h sdsalloc.h dict.h HFILES = pmapi.h impl.h pmda.h pmtime.h pmdaroot.h pmafm.h \ trace.h trace_dev.h mmv_stats.h mmv_dev.h import.h \ pmjson.h pmhttp.h pmdbg.h pmwebapi.h deprecated.h \ @@ -37,20 +36,18 @@ endif GENERATED_HFILES = $(CONFFILES) NOSHIP_HFILES = libpcp.h sdsalloc.h -LDIRT = $(GENERATED_HFILES) $(VENDORED_HFILES) $(EXTERNAL_HFILES) +LDIRT = $(GENERATED_HFILES) $(VENDORED_HFILES) $(LIBVALKEY_HFILES) default :: default_pcp -default_pcp : $(HEADERS) $(GENERATED_HFILES) $(VENDORED_HFILES) $(EXTERNAL_HFILES) +default_pcp : $(HEADERS) $(GENERATED_HFILES) $(VENDORED_HFILES) $(LIBVALKEY_HFILES) include $(BUILDRULES) -$(EXTERNAL_HFILES): - $(LN_S) -f $(TOPDIR)/src/external/$@ . +$(LIBVALKEY_HFILES): + $(LN_S) -f $(TOPDIR)/vendor/github.com/valkey-io/libvalkey/src/$@ . $(INIH_HFILES): $(LN_S) -f $(TOPDIR)/vendor/github.com/benhoyt/inih/$@ . -$(SDSH_HFILES): - $(LN_S) -f $(TOPDIR)/vendor/github.com/redis/hiredis/$@ . $(USDT_HFILES): $(LN_S) -f $(TOPDIR)/vendor/github.com/libbpf/usdt/$@ . diff --git a/src/libpcp3/src/include/pcp/dict.h b/src/libpcp3/src/include/pcp/dict.h index 6575d174fa9..79eaf6ca15e 100644 --- a/src/libpcp3/src/include/pcp/dict.h +++ b/src/libpcp3/src/include/pcp/dict.h @@ -182,7 +182,7 @@ unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, dictScanB uint64_t dictGetHash(dict *d, const void *key); dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t hash); -/* compat functions required for hiredis */ +/* compat functions required for libvalkey */ void dictInitIterator(dictIterator *iter, dict *d); #define dictGetEntryVal dictGetVal #define dictSetHashVal dictSetVal diff --git a/src/libpcp_web/src/GNUmakefile b/src/libpcp_web/src/GNUmakefile index b1cee866b5b..8ace61f32dc 100644 --- a/src/libpcp_web/src/GNUmakefile +++ b/src/libpcp_web/src/GNUmakefile @@ -26,39 +26,34 @@ else INIH_XFILES = endif -HIREDIS_HFILES = $(addprefix deps/hiredis/, \ - alloc.h async.h async_private.h fmacros.h hiredis.h net.h \ - read.h sds.h sdsalloc.h sockcompat.h win32.h adapters/libuv.h) -HIREDIS_CFILES = $(addprefix deps/hiredis/, \ - alloc.c async.c hiredis.c net.c sds.c sockcompat.c read.c) -HIREDIS_XFILES = $(HIREDIS_HFILES) $(HIREDIS_CFILES) - -HIREDIS_CLUSTER_HFILES = $(addprefix deps/hiredis-cluster/, \ - adlist.h command.h hiarray.h hiutil.h hircluster.h win32.h \ - adapters/libuv.h) -HIREDIS_CLUSTER_CFILES = $(addprefix deps/hiredis-cluster/, \ - adlist.c command.c crc16.c hiarray.c hiutil.c hircluster.c) -HIREDIS_CLUSTER_XFILES = $(HIREDIS_CLUSTER_HFILES) $(HIREDIS_CLUSTER_CFILES) +LIBVALKEY_HFILES = $(addprefix deps/libvalkey/, \ + include/valkey/alloc.h include/valkey/async.h src/async_private.h src/fmacros.h include/valkey/valkey.h include/valkey/net.h \ + include/valkey/read.h include/valkey/visibility.h src/sds.h src/sdsalloc.h include/valkey/sockcompat.h src/win32.h include/valkey/adapters/libuv.h \ + src/adlist.h src/command.h src/vkutil.h include/valkey/cluster.h src/valkey_private.h src/cmddef.h src/dict.h) +LIBVALKEY_CFILES = $(addprefix deps/libvalkey/, \ + src/alloc.c src/async.c src/valkey.c src/net.c src/sds.c src/sockcompat.c src/read.c \ + src/adlist.c src/command.c src/crc16.c src/vkutil.c src/cluster.c src/conn.c src/dict.c) +LIBVALKEY_XFILES = $(LIBVALKEY_HFILES) $(LIBVALKEY_CFILES) CFILES = jsmn.c http_client.c http_parser.c siphash.c \ query.c schema.c load.c sha1.c util.c slots.c \ - keys.c dict.c maps.c batons.c encoding.c \ + keys.c maps.c batons.c encoding.c \ search.c json_helpers.c config.c \ - $(HIREDIS_CFILES) $(HIREDIS_CLUSTER_CFILES) + $(LIBVALKEY_CFILES) ifneq "$(HAVE_LIBINIH)" "true" CFILES += $(INIH_CFILES) endif HFILES = jsmn.h http_client.h http_parser.h zmalloc.h \ query.h schema.h load.h sha1.h util.h slots.h \ - keys.h dict.h maps.h batons.h encoding.h \ + keys.h maps.h batons.h encoding.h \ search.h discover.h private.h \ - $(HIREDIS_HFILES) $(HIREDIS_CLUSTER_HFILES) + $(LIBVALKEY_HFILES) ifneq "$(HAVE_LIBINIH)" "true" HFILES += $(INIH_HFILES) endif YFILES = query_parser.y XFILES = jsmn.c jsmn.h http_parser.c http_parser.h \ - sha1.c sha1.h siphash.c dict.c dict.h + sha1.c sha1.h siphash.c LLDLIBS = $(PCPWEBLIB_EXTRAS) $(LIB_FOR_MATH) $(LIB_FOR_REGEX) ifeq "$(TARGET_OS)" "mingw" @@ -72,7 +67,7 @@ ifneq "$(HAVE_LIBINIH)" "true" LCFLAGS += -Iinih endif -LCFLAGS += $(C99_CFLAGS) -DJSMN_PARENT_LINKS=1 -DJSMN_STRICT=1 -DHTTP_PARSER_STRICT=0 -Ideps +LCFLAGS += $(C99_CFLAGS) -DJSMN_PARENT_LINKS=1 -DJSMN_STRICT=1 -DHTTP_PARSER_STRICT=0 -Ideps -Ideps/libvalkey/include/valkey -Ideps/libvalkey/include -Ideps/libvalkey/src ifeq "$(HAVE_LIBUV)" "true" CFILES += discover.c loggroup.c webgroup.c timer.c @@ -113,12 +108,12 @@ SYMTARGET = endif VERSION_SCRIPT = exports -LDIRT = $(XFILES) $(YFILES) $(HIREDIS_XFILES) $(HIREDIS_CLUSTER_XFILES) $(SYMTARGET) $(YFILES:%.y=%.tab.?) exports +LDIRT = $(XFILES) $(YFILES) $(LIBVALKEY_XFILES) $(SYMTARGET) $(YFILES:%.y=%.tab.?) ifneq "$(HAVE_LIBINIH)" "true" LDIRT += $(INIH_XFILES) endif -default: exports $(XFILES) $(HIREDIS_XFILES) $(HIREDIS_CLUSTER_XFILES) $(INIH_XFILES) $(LIBTARGET) $(SYMTARGET) $(STATICLIBTARGET) +default: $(XFILES) $(LIBVALKEY_XFILES) $(INIH_XFILES) $(LIBTARGET) $(SYMTARGET) $(STATICLIBTARGET) include $(BUILDRULES) @@ -148,12 +143,10 @@ $(INIH_XFILES): mkdir -p deps/inih $(LN_S) -f $(realpath $(TOPDIR))/vendor/github.com/benhoyt/$(@:deps/%=%) $@ endif -$(HIREDIS_XFILES): - mkdir -p deps/hiredis/adapters - $(LN_S) -f $(realpath $(TOPDIR))/vendor/github.com/redis/$(@:deps/%=%) $@ -$(HIREDIS_CLUSTER_XFILES): - mkdir -p deps/hiredis-cluster/adapters - $(LN_S) -f $(realpath $(TOPDIR))/vendor/github.com/Nordix/$(@:deps/%=%) $@ +$(LIBVALKEY_XFILES): + mkdir -p deps/libvalkey/include/valkey/adapters + mkdir -p deps/libvalkey/src + $(LN_S) -f $(realpath $(TOPDIR))/vendor/github.com/valkey-io/$(@:deps/%=%) $@ .NOTPARALLEL: query_parser.tab.h query_parser.tab.c: query_parser.y query.h @@ -167,9 +160,11 @@ default_pcp: default install_pcp: install ifneq ($(LIBTARGET),) -$(LIBTARGET): $(VERSION_SCRIPT) $(XFILES) $(HIREDIS_XFILES) $(HIREDIS_CLUSTER_XFILES) $(INIH_XFILES) +$(LIBTARGET): $(VERSION_SCRIPT) $(XFILES) $(LIBVALKEY_XFILES) $(INIH_XFILES) endif +%.o: $(LIBVALKEY_XFILES) $(INIH_XFILES) + jsmn.o: jsmn.c jsmn.h discover.o: discover.h discover.c http_parser.o: http_parser.c http_parser.h diff --git a/src/libpcp_web/src/config.c b/src/libpcp_web/src/config.c index f9857126e39..dee2d9fef58 100644 --- a/src/libpcp_web/src/config.c +++ b/src/libpcp_web/src/config.c @@ -126,7 +126,7 @@ pmIniFileSetup(const char *progname) { dict *config; - if ((config = dictCreate(&sdsOwnDictCallBacks, "pmIniFileSetup")) == NULL) + if ((config = dictCreate(&sdsOwnDictCallBacks)) == NULL) return NULL; if (pmIniFileParse(progname, dict_handler, config) == 0) return config; diff --git a/src/libpcp_web/src/discover.c b/src/libpcp_web/src/discover.c index d44e9ef972c..2b4ad2972f1 100644 --- a/src/libpcp_web/src/discover.c +++ b/src/libpcp_web/src/discover.c @@ -2642,7 +2642,7 @@ pmDiscoverSetup(pmDiscoverModule *module, pmDiscoverCallBacks *cbs, void *arg) /* prepare for optional metric and indom exclusion */ if ((option = pmIniFileLookup(config, "discover", "exclude.metrics"))) { - if ((data->pmids = dictCreate(&intKeyDictCallBacks, NULL)) == NULL) + if ((data->pmids = dictCreate(&intKeyDictCallBacks)) == NULL) return -ENOMEM; /* parse comma-separated metric name glob patterns, in 'option' */ if ((ids = sdssplitlen(option, sdslen(option), ",", 1, &nids))) { @@ -2653,7 +2653,7 @@ pmDiscoverSetup(pmDiscoverModule *module, pmDiscoverCallBacks *cbs, void *arg) } } if ((option = pmIniFileLookup(config, "discover", "exclude.indoms"))) { - if ((data->indoms = dictCreate(&intKeyDictCallBacks, NULL)) == NULL) + if ((data->indoms = dictCreate(&intKeyDictCallBacks)) == NULL) return -ENOMEM; /* parse comma-separated indoms in 'option', convert to pmInDom */ if ((ids = sdssplitlen(option, sdslen(option), ",", 1, &nids))) { diff --git a/src/libpcp_web/src/exports.in b/src/libpcp_web/src/exports.in index a2a37d6bbbf..576b7073168 100644 --- a/src/libpcp_web/src/exports.in +++ b/src/libpcp_web/src/exports.in @@ -294,3 +294,11 @@ PCP_WEB_1.22 { pmLogPathsReset; pmLogPathsSetMetricRegistry; } PCP_WEB_1.21; + +PCP_WEB_1.23 { + dictGetVal; + dictGetKey; + dictInitIterator; + dictSetVal; + keySlotsContextFree; +} PCP_WEB_1.22; diff --git a/src/libpcp_web/src/keys.c b/src/libpcp_web/src/keys.c index c85d84b6ceb..ef32e1bf498 100644 --- a/src/libpcp_web/src/keys.c +++ b/src/libpcp_web/src/keys.c @@ -27,7 +27,7 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ -#include +#include #include "keys.h" const char * diff --git a/src/libpcp_web/src/keys.h b/src/libpcp_web/src/keys.h index 2dd87a0a930..76f8c224db8 100644 --- a/src/libpcp_web/src/keys.h +++ b/src/libpcp_web/src/keys.h @@ -30,13 +30,14 @@ #ifndef RESP_KEYS_H #define RESP_KEYS_H -#include -#include +#include +#include +#include -#define RESP_OK REDIS_OK -#define RESP_ERR REDIS_ERR -#define RESP_ERR_IO REDIS_ERR_IO -#define RESP_CONN_UNIX REDIS_CONN_UNIX +#define RESP_OK VALKEY_OK +#define RESP_ERR VALKEY_ERR +#define RESP_ERR_IO VALKEY_ERR_IO +#define RESP_CONN_UNIX VALKEY_CONN_UNIX /* * Unfortunately there is no error code for these errors to use. @@ -47,49 +48,52 @@ #define RESP_ELOADDATA "loading the dataset in memory" #define RESP_ENOCLUSTER "ERR This instance has cluster support disabled" -#define RESP_KEEPALIVE_INTERVAL REDIS_KEEPALIVE_INTERVAL +#define RESP_KEEPALIVE_INTERVAL VALKEY_KEEPALIVE_INTERVAL /* * Protocol reply types */ -#define RESP_REPLY_STRING REDIS_REPLY_STRING -#define RESP_REPLY_ARRAY REDIS_REPLY_ARRAY -#define RESP_REPLY_BOOL REDIS_REPLY_BOOL -#define RESP_REPLY_DOUBLE REDIS_REPLY_DOUBLE -#define RESP_REPLY_INTEGER REDIS_REPLY_INTEGER -#define RESP_REPLY_MAP REDIS_REPLY_MAP -#define RESP_REPLY_NIL REDIS_REPLY_NIL -#define RESP_REPLY_SET REDIS_REPLY_SET -#define RESP_REPLY_STATUS REDIS_REPLY_STATUS -#define RESP_REPLY_ERROR REDIS_REPLY_ERROR +#define RESP_REPLY_STRING VALKEY_REPLY_STRING +#define RESP_REPLY_ARRAY VALKEY_REPLY_ARRAY +#define RESP_REPLY_BOOL VALKEY_REPLY_BOOL +#define RESP_REPLY_DOUBLE VALKEY_REPLY_DOUBLE +#define RESP_REPLY_INTEGER VALKEY_REPLY_INTEGER +#define RESP_REPLY_MAP VALKEY_REPLY_MAP +#define RESP_REPLY_NIL VALKEY_REPLY_NIL +#define RESP_REPLY_SET VALKEY_REPLY_SET +#define RESP_REPLY_STATUS VALKEY_REPLY_STATUS +#define RESP_REPLY_ERROR VALKEY_REPLY_ERROR -#define respReply redisReply -#define respReader redisReader -#define respReaderCreate redisReaderCreate -#define respReaderFeed redisReaderFeed -#define respReaderFree redisReaderFree -#define respReaderGetReply redisReaderGetReply +#define respReply valkeyReply +#define respReader valkeyReader +#define respReaderCreate valkeyReaderCreate +#define respReaderFeed valkeyReaderFeed +#define respReaderFree valkeyReaderFree +#define respReaderGetReply valkeyReaderGetReply -#define keysAsyncContext redisAsyncContext -#define keysAsyncEnableKeepAlive redisAsyncEnableKeepAlive -#define keysKeepAlive redisKeepAlive +#define keysAsyncContext valkeyAsyncContext +#define keysAsyncEnableKeepAlive valkeyAsyncEnableKeepAlive +#define keysKeepAlive valkeyKeepAlive -#define keyClusterAsyncFree redisClusterAsyncFree -#define keyClusterAsyncContext redisClusterAsyncContext -#define keyClusterAsyncContextInit redisClusterAsyncContextInit -#define keyClusterCallbackFn redisClusterCallbackFn -#define keyClusterConnect2 redisClusterConnect2 -#define keyClusterAsyncDisconnect redisClusterAsyncDisconnect -#define keyClusterSetOptionAddNodes redisClusterSetOptionAddNodes -#define keyClusterSetOptionPassword redisClusterSetOptionPassword -#define keyClusterSetOptionUsername redisClusterSetOptionUsername -#define keyClusterSetOptionConnectTimeout redisClusterSetOptionConnectTimeout -#define keyClusterSetOptionTimeout redisClusterSetOptionTimeout -#define keyClusterLibuvAttach redisClusterLibuvAttach -#define keyClusterAsyncSetConnectCallback redisClusterAsyncSetConnectCallback -#define keyClusterAsyncSetDisconnectCallback redisClusterAsyncSetDisconnectCallback -#define keyClusterAsyncFormattedCommand redisClusterAsyncFormattedCommand -#define keyClusterAsyncFormattedCommandToNode redisClusterAsyncFormattedCommandToNode +#define keyClusterAsyncFree valkeyClusterAsyncFree +#define keyClusterAsyncContext valkeyClusterAsyncContext +#define keyClusterAsyncContextInit valkeyClusterAsyncConnectWithOptions +#define keyClusterCallbackFn valkeyClusterCallbackFn +#define keyClusterConnect2 valkeyClusterConnectWithOptions +#define keyClusterAsyncDisconnect valkeyClusterAsyncDisconnect +#define keyClusterSetOptionAddNodes valkeyClusterOptions.initial_nodes +#define keyClusterSetOptionPassword valkeyClusterOptions.password +#define keyClusterSetOptionUsername valkeyClusterOptions.username +#define keyClusterSetOptionConnectTimeout valkeyClusterOptions.connect_timeout +#define keyClusterSetOptionTimeout valkeyClusterSetOptionTimeout +#define keyLibuvAttachAdapter valkeyLibuvAttachAdapter +#define keyClusterAsyncSetConnectCallback valkeyClusterOptions.async_connect_callback +#define keyClusterAsyncSetDisconnectCallback valkeyClusterOptions.async_disconnect_callback +#define keyClusterAsyncFormattedCommand valkeyClusterAsyncFormattedCommand +#define keyClusterAsyncFormattedCommandToNode valkeyClusterAsyncFormattedCommandToNode + +/* valkey cluster options */ +#define KEYOPT_BLOCKING_INITIAL_UPDATE VALKEY_OPT_BLOCKING_INITIAL_UPDATE extern const char *resp_reply_type(respReply *); extern int keysAsyncEnableKeepAlive(keysAsyncContext *); diff --git a/src/libpcp_web/src/load.c b/src/libpcp_web/src/load.c index eac565d43e3..b68ab930a57 100644 --- a/src/libpcp_web/src/load.c +++ b/src/libpcp_web/src/load.c @@ -61,9 +61,11 @@ pmidErr(seriesLoadBaton *baton, pmID pmid, const char *fmt, ...) char **names; sds msg; - if (dictFetchValue(baton->errors, &pmid) == NULL) { - dictAdd(baton->errors, &pmid, NULL); - if ((numnames = pmNameAll(pmid, &names)) < 1) + { + dictEntry *entry; + if ((entry = dictFind(baton->errors, &pmid)) == NULL) { + dictAdd(baton->errors, &pmid, NULL); + if ((numnames = pmNameAll(pmid, &names)) < 1) msg = sdsnew(""); else { msg = sdsnew(names[0]); @@ -464,9 +466,11 @@ series_cache_update(seriesLoadBaton *baton, struct dict *exclude) continue; /* check if in the restricted group (optional metric filter) */ - if (dictSize(baton->wanted) && - dictFetchValue(baton->wanted, &vsp->pmid) == NULL) - continue; + if (dictSize(baton->wanted)) { + dictEntry *entry; + if ((entry = dictFind(baton->wanted, &vsp->pmid)) == NULL) + continue; + } /* check if metric to be skipped (optional metric exclusion) */ if (exclude && (dictFind(exclude, &vsp->pmid)) != NULL) @@ -475,14 +479,18 @@ series_cache_update(seriesLoadBaton *baton, struct dict *exclude) write_meta = write_inst = 0; /* check if pmid already in hash list */ - if ((metric = dictFetchValue(cp->pmids, &vsp->pmid)) == NULL) { - /* create a new metric, and add it to load context */ - if ((metric = new_metric(baton, vsp)) == NULL) - continue; - write_meta = 1; - } else { /* pmid already observed */ - if ((write_meta = metric->cached) == 0) - get_metric_metadata(baton, metric); + { + dictEntry *entry; + if ((entry = dictFind(cp->pmids, &vsp->pmid)) == NULL) { + /* create a new metric, and add it to load context */ + if ((metric = new_metric(baton, vsp)) == NULL) + continue; + write_meta = 1; + } else { /* pmid already observed */ + metric = (metric_t *)dictGetVal(entry); + if ((write_meta = metric->cached) == 0) + get_metric_metadata(baton, metric); + } } /* iterate through result instances and ensure metric_t is complete */ @@ -649,8 +657,6 @@ server_cache_window(void *arg) { seriesLoadBaton *baton = (seriesLoadBaton *)arg; seriesGetContext *context = &baton->pmapi; - uv_timer_t *timer; /* adaptive delay for request balancing */ - uint64_t msecs; seriesBatonCheckMagic(baton, MAGIC_LOAD, "server_cache_window"); seriesBatonCheckCount(context, "server_cache_window"); @@ -660,6 +666,8 @@ server_cache_window(void *arg) fprintf(stderr, "%s: fetching next result\n", "server_cache_window"); #if defined(HAVE_LIBUV) + uv_timer_t *timer; + uint64_t msecs; seriesBatonReference(baton, "server_cache_window"); seriesBatonReference(context, "server_cache_window"); context->done = server_cache_series_finished; @@ -1095,10 +1103,10 @@ connect_keys_source_service(seriesLoadBaton *baton) if ((baton->flags & PM_SERIES_FLAG_TEXT)) flags |= SLOTS_SEARCH; baton->slots = data->slots = - keySlotsConnect( + &(keySlotsConnect( data->config, flags, baton->info, series_load_end_phase, baton->userdata, - data->events, (void *)baton); + data->events, (void *)baton))->slots; } } } @@ -1138,9 +1146,9 @@ initSeriesLoadBaton(seriesLoadBaton *baton, void *module, pmSeriesFlags flags, baton->userdata = userdata; baton->flags = flags; - baton->errors = dictCreate(&intKeyDictCallBacks, baton); - baton->wanted = dictCreate(&intKeyDictCallBacks, baton); - baton->exclude_pmids = dictCreate(&intKeyDictCallBacks, baton); + baton->errors = dictCreate(&intKeyDictCallBacks); + baton->wanted = dictCreate(&intKeyDictCallBacks); + baton->exclude_pmids = dictCreate(&intKeyDictCallBacks); } void @@ -1438,8 +1446,10 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *event, for (i = 0; indom && i < nsets; i++) { id = sets[i].inst; - if ((instance = dictFetchValue(indom->insts, &id)) == NULL) + dictEntry *entry; + if ((entry = dictFind(indom->insts, &id)) == NULL) continue; + instance = (instance_t *)dictGetVal(entry); if ((labels = pmwebapi_labelsetdup(&sets[i])) == NULL) { infofmt(msg, "failed to dup indom %s instance label set: %s", pmInDomStr_r(indom->indom, idbuf, sizeof(idbuf)), diff --git a/src/libpcp_web/src/load.h b/src/libpcp_web/src/load.h index 50556d6efec..be88fcdb6ca 100644 --- a/src/libpcp_web/src/load.h +++ b/src/libpcp_web/src/load.h @@ -23,6 +23,9 @@ typedef void *uv_timer_t; #endif +/* Forward declaration */ +typedef struct keyMap keyMap; + typedef struct seriesname { sds sds; /* external name for the series */ unsigned char id[20]; /* SHA1 of external series name */ @@ -74,7 +77,7 @@ typedef struct labellist { sds value; unsigned int flags; struct labellist *next; - struct dict *valuemap; + keyMap *valuemap; void *arg; } labellist_t; diff --git a/src/libpcp_web/src/loggroup.c b/src/libpcp_web/src/loggroup.c index 0b7208bd633..d8a15515ad0 100644 --- a/src/libpcp_web/src/loggroup.c +++ b/src/libpcp_web/src/loggroup.c @@ -222,7 +222,9 @@ loggroup_new_archive(pmLogGroupSettings *sp, __pmLogLabel *label, int archive; if (params) { - if ((timeout = dictFetchValue(params, PARAM_POLLTIME)) != NULL) { + dictEntry *entry; + if ((entry = dictFind(params, PARAM_POLLTIME)) != NULL) { + timeout = (sds)dictGetVal(entry); seconds = strtod(timeout, &endptr); if (*endptr != '\0') return -EINVAL; @@ -291,7 +293,7 @@ loggroup_timers_stop(struct loggroups *groups) static void loggroup_garbage_collect(struct loggroups *groups) { - dictIterator *iterator; + dictIterator iter; dictEntry *entry; archive_t *ap; unsigned int debug; @@ -305,10 +307,10 @@ loggroup_garbage_collect(struct loggroups *groups) if (uv_mutex_trylock(&groups->mutex) != 0) return; - iterator = dictGetSafeIterator(groups->archives); - for (entry = dictNext(iterator); entry;) { + dictInitIterator(&iter, groups->archives); + for (entry = dictNext(&iter); entry;) { ap = (archive_t *)dictGetVal(entry); - entry = dictNext(iterator); + entry = dictNext(&iter); if (ap->privdata != groups) continue; if (ap->garbage) @@ -327,7 +329,6 @@ loggroup_garbage_collect(struct loggroups *groups) } count++; } - dictReleaseIterator(iterator); if (groups->update) { logpaths_stats_reset(groups); @@ -357,7 +358,7 @@ loggroup_garbage_collect(struct loggroups *groups) static void loggroup_reset_archives(struct loggroups *groups) { - dictIterator *iterator; + dictIterator iter; dictEntry *entry; archive_t *ap; unsigned int debug, count = 0; @@ -367,10 +368,10 @@ loggroup_reset_archives(struct loggroups *groups) fprintf(stderr, "%s: started for groups %p\n", __FUNCTION__, groups); uv_mutex_lock(&groups->mutex); - iterator = dictGetSafeIterator(groups->archives); - for (entry = dictNext(iterator); entry;) { + dictInitIterator(&iter, groups->archives); + for (entry = dictNext(&iter); entry;) { ap = (archive_t *)dictGetVal(entry); - entry = dictNext(iterator); + entry = dictNext(&iter); if (ap->privdata != groups) continue; ap->refcount++; @@ -381,7 +382,6 @@ loggroup_reset_archives(struct loggroups *groups) groups->update = 1; count++; } - dictReleaseIterator(iterator); if (groups->update) { logpaths_stats_reset(groups); @@ -454,9 +454,12 @@ loggroup_lookup_archive(pmLogGroupSettings *sp, int id, struct archive **pp, voi default_worker, default_worker); } - ap = (struct archive *)dictFetchValue(groups->archives, &id); - if (ap == NULL) - return -ESRCH; + { + dictEntry *entry; + if ((entry = dictFind(groups->archives, &id)) == NULL) + return -ESRCH; + ap = (struct archive *)dictGetVal(entry); + } if ((ap = loggroup_use_archive(ap)) == NULL) return -ENOTCONN; *pp = ap; @@ -845,7 +848,7 @@ pmLogGroupSetup(pmLogGroupModule *module) srandom(pid ^ (unsigned int)ts.tv_sec ^ (unsigned int)ts.tv_nsec); /* setup a dictionary mapping archive number to data */ - groups->archives = dictCreate(&intKeyDictCallBacks, NULL); + groups->archives = dictCreate(&intKeyDictCallBacks); return 0; } @@ -865,14 +868,13 @@ pmLogGroupSetEventLoop(pmLogGroupModule *module, void *events) static void loggroup_free(struct loggroups *groups) { - dictIterator *iterator; + dictIterator iter; dictEntry *entry; /* walk the archives, stop timers and free resources */ - iterator = dictGetIterator(groups->archives); - while ((entry = dictNext(iterator)) != NULL) + dictInitIterator(&iter, groups->archives); + while ((entry = dictNext(&iter)) != NULL) loggroup_drop_archive((archive_t *)dictGetVal(entry), NULL); - dictReleaseIterator(iterator); dictRelease(groups->archives); loggroup_timers_stop(groups); memset(groups, 0, sizeof(struct loggroups)); @@ -894,24 +896,31 @@ pmLogGroupSetConfiguration(pmLogGroupModule *module, dict *config) } /* allocate strings for parameter dictionary key lookups */ - if ((value = dictFetchValue(config, WORK_TIMER)) == NULL) { - default_worker = DEFAULT_WORK_TIMER; - } else { - default_worker = strtoul(value, &endnum, 0); - if (*endnum != '\0') + { + dictEntry *entry; + if ((entry = dictFind(config, WORK_TIMER)) == NULL) { default_worker = DEFAULT_WORK_TIMER; - } + } else { + value = (sds)dictGetVal(entry); + default_worker = strtoul(value, &endnum, 0); + if (*endnum != '\0') + default_worker = DEFAULT_WORK_TIMER; + } - if ((value = dictFetchValue(config, POLL_TIMEOUT)) == NULL) { - default_timeout = DEFAULT_POLL_TIMEOUT; - } else { - default_timeout = strtoul(value, &endnum, 0); - if (*endnum != '\0') + if ((entry = dictFind(config, POLL_TIMEOUT)) == NULL) { default_timeout = DEFAULT_POLL_TIMEOUT; - } + } else { + value = (sds)dictGetVal(entry); + default_timeout = strtoul(value, &endnum, 0); + if (*endnum != '\0') + default_timeout = DEFAULT_POLL_TIMEOUT; + } - if ((value = dictFetchValue(config, CACHED_ONLY)) != NULL) - cached_only = (strcmp(value, "true") == 0); + if ((entry = dictFind(config, CACHED_ONLY)) != NULL) { + value = (sds)dictGetVal(entry); + cached_only = (strcmp(value, "true") == 0); + } + } if (groups) { groups->config = config; @@ -924,33 +933,32 @@ static void logpaths_stats_insts(struct loggroups *groups) { mmv_registry_t *rp = groups->logpaths; - dictIterator *iterator; + dictIterator iter; dictEntry *entry; archive_t *ap; /* walk archives, update instance domain */ - iterator = dictGetIterator(groups->archives); - while ((entry = dictNext(iterator)) != NULL) { + dictInitIterator(&iter, groups->archives); + while ((entry = dictNext(&iter)) != NULL) { ap = (archive_t *)dictGetVal(entry); if (ap->privdata != groups || ap->inactive || ap->garbage) continue; mmv_stats_add_instance(rp, LOGPATHS, ap->randomid, ap->idstring); } - dictReleaseIterator(iterator); } static void logpaths_stats_value(struct loggroups *groups) { - dictIterator *iterator; + dictIterator iter; dictEntry *entry; pmAtomValue *atom; archive_t *ap; uint32_t count = 0; /* walk archives, update the value (archive path) for each instance */ - iterator = dictGetIterator(groups->archives); - while ((entry = dictNext(iterator)) != NULL) { + dictInitIterator(&iter, groups->archives); + while ((entry = dictNext(&iter)) != NULL) { ap = (archive_t *)dictGetVal(entry); if (ap->privdata != groups || ap->inactive || ap->garbage) continue; @@ -958,7 +966,6 @@ logpaths_stats_value(struct loggroups *groups) mmv_set_string(groups->logmap, atom, ap->fullpath, sdslen(ap->fullpath)); count++; } - dictReleaseIterator(iterator); mmv_set(groups->logmap, groups->logmetrics[LOGPATHS_COUNT], &count); } diff --git a/src/libpcp_web/src/maps.c b/src/libpcp_web/src/maps.c index 15a9091792d..a51b3b50928 100644 --- a/src/libpcp_web/src/maps.c +++ b/src/libpcp_web/src/maps.c @@ -29,35 +29,32 @@ intHashCallBack(const void *key) { const unsigned int *i = (const unsigned int *)key; - return dictGenHashFunction(i, sizeof(unsigned int)); + return dictGenHashFunction((const unsigned char *)i, sizeof(unsigned int)); } static int -intCmpCallBack(void *privdata, const void *a, const void *b) +intCmpCallBack(const void *a, const void *b) { const unsigned int *ia = (const unsigned int *)a; const unsigned int *ib = (const unsigned int *)b; - (void)privdata; return (*ia == *ib); } static void * -intDupCallBack(void *privdata, const void *key) +intDupCallBack(const void *key) { - unsigned int *i = (unsigned int *)key; + const unsigned int *i = (const unsigned int *)key; unsigned int *k = (unsigned int *)malloc(sizeof(*i)); - (void)privdata; if (k) *k = *i; return k; } static void -intFreeCallBack(void *privdata, void *value) +intFreeCallBack(void *value) { - (void)privdata; if (value) free(value); } @@ -75,11 +72,10 @@ sdsHashCallBack(const void *key) } static int -sdsCompareCallBack(void *privdata, const void *key1, const void *key2) +sdsCompareCallBack(const void *key1, const void *key2) { int l1, l2; - (void)privdata; l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; @@ -87,15 +83,14 @@ sdsCompareCallBack(void *privdata, const void *key1, const void *key2) } static void * -sdsDupCallBack(void *privdata, const void *key) +sdsDupCallBack(const void *key) { return sdsdup((sds)key); } static void -sdsFreeCallBack(void *privdata, void *val) +sdsFreeCallBack(void *val) { - (void)privdata; sdsfree(val); } @@ -124,41 +119,53 @@ dictType sdsDictCallBacks = { void keyMapsInit(void) { - if (instmap == NULL) - instmap = dictCreate(&sdsDictCallBacks, - (void *)sdsnew("inst.name")); - if (namesmap == NULL) - namesmap = dictCreate(&sdsDictCallBacks, - (void *)sdsnew("metric.name")); - if (labelsmap == NULL) - labelsmap = dictCreate(&sdsDictCallBacks, - (void *)sdsnew("label.name")); - if (contextmap == NULL) - contextmap = dictCreate(&sdsDictCallBacks, - (void *)sdsnew("context.name")); + if (instmap == NULL) { + instmap = malloc(sizeof(keyMap)); + instmap->dict = dictCreate(&sdsDictCallBacks); + instmap->privdata = sdsnew("inst.name"); + } + if (namesmap == NULL) { + namesmap = malloc(sizeof(keyMap)); + namesmap->dict = dictCreate(&sdsDictCallBacks); + namesmap->privdata = sdsnew("metric.name"); + } + if (labelsmap == NULL) { + labelsmap = malloc(sizeof(keyMap)); + labelsmap->dict = dictCreate(&sdsDictCallBacks); + labelsmap->privdata = sdsnew("label.name"); + } + if (contextmap == NULL) { + contextmap = malloc(sizeof(keyMap)); + contextmap->dict = dictCreate(&sdsDictCallBacks); + contextmap->privdata = sdsnew("context.name"); + } } void keyMapsClose(void) { if (instmap) { - sdsfree(keyMapName(instmap)); - dictRelease(instmap); + sdsfree((sds)instmap->privdata); + dictRelease(instmap->dict); + free(instmap); instmap = NULL; } if (namesmap) { - sdsfree(keyMapName(namesmap)); - dictRelease(namesmap); + sdsfree((sds)namesmap->privdata); + dictRelease(namesmap->dict); + free(namesmap); namesmap = NULL; } if (labelsmap) { - sdsfree(keyMapName(labelsmap)); - dictRelease(labelsmap); + sdsfree((sds)labelsmap->privdata); + dictRelease(labelsmap->dict); + free(labelsmap); labelsmap = NULL; } if (contextmap) { - sdsfree(keyMapName(contextmap)); - dictRelease(contextmap); + sdsfree((sds)contextmap->privdata); + dictRelease(contextmap->dict); + free(contextmap); contextmap = NULL; } } @@ -172,14 +179,17 @@ keyMapName(keyMap *map) keyMap * keyMapCreate(sds name) { - return dictCreate(&sdsDictCallBacks, (void *)name); + keyMap *map = malloc(sizeof(keyMap)); + map->dict = dictCreate(&sdsDictCallBacks); + map->privdata = name; + return map; } keyMapEntry * keyMapLookup(keyMap *map, sds key) { - if (map) - return dictFind(map, key); + if (map && map->dict) + return dictFind(map->dict, key); return NULL; } @@ -190,9 +200,9 @@ keyMapInsert(keyMap *map, sds key, sds value) if (entry) { /* fix for Coverity CID323605 Resource Leak */ - dictDelete(map, key); + dictDelete(map->dict, key); } - dictAdd(map, key, value); + dictAdd(map->dict, key, value); } sds @@ -204,6 +214,9 @@ keyMapValue(keyMapEntry *entry) void keyMapRelease(keyMap *map) { - sdsfree(keyMapName(map)); - dictRelease(map); + if (map) { + sdsfree((sds)map->privdata); + dictRelease(map->dict); + free(map); + } } diff --git a/src/libpcp_web/src/maps.h b/src/libpcp_web/src/maps.h index e3463fbdbc4..4f4aa2c8ae0 100644 --- a/src/libpcp_web/src/maps.h +++ b/src/libpcp_web/src/maps.h @@ -19,7 +19,16 @@ #include "batons.h" struct keySlots; -typedef dict keyMap; + +/* + * Wrapper for dict that includes privdata (metadata). + * This is needed because libvalkey dict doesn't have privdata field. + */ +typedef struct keyMap { + dict *dict; /* the actual dictionary */ + void *privdata; /* private data (typically sds name) */ +} keyMap; + typedef dictEntry keyMapEntry; /* diff --git a/src/libpcp_web/src/query.c b/src/libpcp_web/src/query.c index e0f24236ec4..58f1132ae6f 100644 --- a/src/libpcp_web/src/query.c +++ b/src/libpcp_web/src/query.c @@ -5141,10 +5141,10 @@ series_query_services(void *arg) baton->error = -ENOTSUP; else baton->slots = data->slots = - keySlotsConnect( + &(keySlotsConnect( data->config, 1, baton->info, series_query_end_phase, baton->userdata, - data->events, (void *)baton); + data->events, (void *)baton))->slots; } } @@ -6166,10 +6166,10 @@ series_lookup_services(void *arg) baton->error = -ENOTSUP; else baton->slots = data->slots = - keySlotsConnect( + &(keySlotsConnect( data->config, 1, baton->info, series_query_end_phase, baton->userdata, - data->events, (void *)baton); + data->events, (void *)baton))->slots; } } diff --git a/src/libpcp_web/src/schema.c b/src/libpcp_web/src/schema.c index 6eb739d1618..b926db70c42 100644 --- a/src/libpcp_web/src/schema.c +++ b/src/libpcp_web/src/schema.c @@ -133,7 +133,7 @@ key_map_publish(keyMapBaton *baton) sdsfree(msg); sdsfree(key); - keySlotsRequest(baton->slots, cmd, key_map_publish_callback, baton); + keySlotsRequestFirstNode(baton->slots, cmd, key_map_publish_callback, baton); sdsfree(cmd); } @@ -729,12 +729,14 @@ keys_series_metric(keySlots *slots, metric_t *metric, } else { for (i = 0; i < metric->u.vlist->listcount; i++) { value = &metric->u.vlist->value[i]; - if ((instance = dictFetchValue(metric->indom->insts, &value->inst)) == NULL) { + dictEntry *entry; + if ((entry = dictFind(metric->indom->insts, &value->inst)) == NULL) { if (pmDebugOptions.series) fprintf(stderr, "indom lookup failure for %s instance %u", pmInDomStr(metric->indom->indom), value->inst); continue; } + instance = (instance_t *)dictGetVal(entry); assert(instance->name.sds != NULL); seriesBatonReference(baton, "keys_series_metric"); keyGetMap(slots, @@ -909,8 +911,10 @@ keys_series_metadata(context_t *context, metric_t *metric, void *arg) } else { for (i = 0; i < metric->u.vlist->listcount; i++) { value = &metric->u.vlist->value[i]; - if ((instance = dictFetchValue(metric->indom->insts, &value->inst)) == NULL) + dictEntry *entry; + if ((entry = dictFind(metric->indom->insts, &value->inst)) == NULL) continue; + instance = (instance_t *)dictGetVal(entry); keys_series_instance(slots, metric, instance, baton); if (metric->cached == 0 || instance->cached == 0) keys_series_labelset(slots, metric, instance, baton); @@ -924,8 +928,10 @@ keys_series_metadata(context_t *context, metric_t *metric, void *arg) } for (i = 0; i < metric->u.vlist->listcount; i++) { value = &metric->u.vlist->value[i]; - if ((instance = dictFetchValue(metric->indom->insts, &value->inst)) == NULL) + dictEntry *entry; + if ((entry = dictFind(metric->indom->insts, &value->inst)) == NULL) continue; + instance = (instance_t *)dictGetVal(entry); instance->cached = 1; } metric->cached = 1; @@ -1106,8 +1112,10 @@ keys_series_stream(keySlots *slots, sds stamp, metric_t *metric, if (v->updated == 0) continue; - if ((inst = dictFetchValue(metric->indom->insts, &v->inst)) == NULL) + dictEntry *entry; + if ((entry = dictFind(metric->indom->insts, &v->inst)) == NULL) continue; + inst = (instance_t *)dictGetVal(entry); name = sdscpylen(name, (const char *)inst->name.hash, sizeof(inst->name.hash)); stream = series_stream_value(stream, name, type, &v->atom); count += 2; @@ -1332,7 +1340,6 @@ decodeCommandKey(keySlotsBaton *baton, int index, respReply *reply) { keySlots *slots = baton->slots; respReply *node; - dictEntry *entry; long long position; sds msg, cmd; @@ -1367,13 +1374,37 @@ decodeCommandKey(keySlotsBaton *baton, int index, respReply *reply) "NAME for %s element %d", COMMAND, index)) == NULL) return -EINVAL; - if ((entry = dictAddRaw(slots->keymap, cmd, NULL)) != NULL) { - dictSetSignedIntegerVal(entry, position); + /* dictAddRaw replacement using libvalkey dict API */ + /* Since libvalkey dict only stores void* values, we allocate an integer + * and store a pointer to it. This matches the behavior of dictSetSignedIntegerVal + * which would have stored the integer directly in PCP's dict union. */ + { + int64_t *position_ptr; + + /* Check if key already exists */ + if (dictFind(slots->keymap->dict, cmd) != NULL) { + sdsfree(cmd); + return -ENOMEM; /* Key already exists */ + } + + /* Allocate integer for position value (libvalkey dict only stores void*) */ + position_ptr = malloc(sizeof(int64_t)); + if (position_ptr == NULL) { + sdsfree(cmd); + return -ENOMEM; + } + *position_ptr = position; + + /* Use dictAdd to add the entry with the position pointer as value */ + if (dictAdd(slots->keymap->dict, cmd, position_ptr) != DICT_OK) { + free(position_ptr); + sdsfree(cmd); + return -ENOMEM; + } + sdsfree(cmd); return 0; } - sdsfree(cmd); - return -ENOMEM; } static void @@ -1457,7 +1488,7 @@ keysSchemaLoad(keySlots *slots, keySlotsFlags flags, /* Register the pmsearch schema with RediSearch if needed */ if (flags & SLOTS_SEARCH) { /* if we got a route update means we are in cluster mode */ - if (slots->acc->cc->route_version > 0) { + if (slots->acc && slots->acc->cc.route_version > 0) { pmNotifyErr(LOG_INFO, "disabling search module " "because it does not support cluster mode\n"); } else { @@ -1707,9 +1738,9 @@ pmSeriesSetup(pmSeriesModule *module, void *arg) if (option && strcmp(option, "true") == 0) flags |= SLOTS_SEARCH; - data->slots = keySlotsConnect( + data->slots = &(keySlotsConnect( data->config, flags, module->on_info, - module->on_setup, arg, data->events, arg); + module->on_setup, arg, data->events, arg))->slots; data->shareslots = 0; } diff --git a/src/libpcp_web/src/search.c b/src/libpcp_web/src/search.c index 744e56c2345..61e0fe8176c 100644 --- a/src/libpcp_web/src/search.c +++ b/src/libpcp_web/src/search.c @@ -1090,9 +1090,9 @@ pmSearchSetup(pmSearchModule *module, void *arg) return -ENOTSUP; /* establish an initial connection to key server instance(s) */ - data->slots = keySlotsConnect( + data->slots = &(keySlotsConnect( data->config, flags, module->on_info, - module->on_setup, arg, data->events, arg); + module->on_setup, arg, data->events, arg))->slots; data->shareslots = 0; } return 0; diff --git a/src/libpcp_web/src/slots.c b/src/libpcp_web/src/slots.c index defe67f4b22..9901b9ae35e 100644 --- a/src/libpcp_web/src/slots.c +++ b/src/libpcp_web/src/slots.c @@ -20,8 +20,10 @@ #ifdef HAVE_STRINGS_H #include #endif +#include +#include "adlist.h" #if defined(HAVE_LIBUV) -#include +#include #else static int keyClusterLibuvAttach() { return RESP_OK; } #endif @@ -29,7 +31,7 @@ static int keyClusterLibuvAttach() { return RESP_OK; } static char default_server[] = "localhost:6379"; static void -key_server_connect_callback(const keysAsyncContext *keys, int status) +key_server_connect_callback(keysAsyncContext *keys, int status) { if (status == RESP_OK) { if (pmDebugOptions.series) @@ -154,31 +156,37 @@ keySlotsSetMetricRegistry(keySlots *slots, mmv_registry_t *registry) return -ENOMEM; } -keySlots * +keySlotsContext * keySlotsInit(dict *config, void *events) { - keySlots *slots; + keySlotsContext *context; sds servers = NULL; sds def_servers = NULL; sds username = NULL; sds password = NULL; - int sts = 0; - struct timeval connection_timeout = {5, 0}; // 5s - struct timeval command_timeout = {60, 0}; // 1m - if ((slots = (keySlots *)calloc(1, sizeof(keySlots))) == NULL) { - pmNotifyErr(LOG_ERR, "%s: failed to allocate keySlots\n", + if ((context = (keySlotsContext *)calloc(1, sizeof(keySlotsContext))) == NULL) { + pmNotifyErr(LOG_ERR, "%s: failed to allocate keySlotsContext\n", "keySlotsInit"); return NULL; } - slots->state = SLOTS_DISCONNECTED; - slots->events = events; - slots->keymap = dictCreate(&sdsKeyDictCallBacks, "keymap"); - if (slots->keymap == NULL) { + context->slots.state = SLOTS_DISCONNECTED; + context->slots.events = events; + context->slots.keymap = malloc(sizeof(keyMap)); + if (context->slots.keymap == NULL) { pmNotifyErr(LOG_ERR, "%s: failed to allocate keymap\n", - "keySlotsInit"); - free(slots); + "keySlotsInit"); + free(context); + return NULL; + } + context->slots.keymap->dict = dictCreate(&sdsKeyDictCallBacks); + context->slots.keymap->privdata = NULL; + if (context->slots.keymap->dict == NULL) { + pmNotifyErr(LOG_ERR, "%s: failed to allocate keymap dict\n", + "keySlotsInit"); + free(context->slots.keymap); + free(context); return NULL; } @@ -201,107 +209,53 @@ keySlotsInit(dict *config, void *events) if (password == NULL) password = pmIniFileLookup(config, "pmseries", "auth.password"); - if ((slots->acc = keyClusterAsyncContextInit()) == NULL) { - /* Coverity CID370635 */ - pmNotifyErr(LOG_ERR, "%s: %s failed\n", - "keySlotsInit", "keyClusterAsyncContextInit"); - sdsfree(def_servers); - return slots; - } - - if (slots->acc->err) { - pmNotifyErr(LOG_ERR, "%s: %s\n", "keySlotsInit", slots->acc->errstr); - sdsfree(def_servers); - return slots; - } - - sts = keyClusterSetOptionAddNodes(slots->acc->cc, servers); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to add key server nodes: %s\n", - "keySlotsInit", slots->acc->cc->errstr); - sdsfree(def_servers); - return slots; - } - sdsfree(def_servers); /* Coverity CID370634 */ - /* + * Add options to the valkeyClusterOptions struct * the ini parser already removes spaces at the beginning and end of the * configuration values, so checking for empty strings using sdslen() is - * fine - */ - if (username != NULL && sdslen(username) > 0) { - sts = keyClusterSetOptionUsername(slots->acc->cc, username); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set key server username: %s\n", - "keyClusterSetOptionUsername", slots->acc->cc->errstr); - return slots; - } - } - - /* + * fine. * see note above re empty configuration values having only a password * set and no username is a valid key server configuration, details: * https://valkey.io/commands/auth - */ - if (password != NULL && sdslen(password) > 0) { - sts = keyClusterSetOptionPassword(slots->acc->cc, password); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set key server password: %s\n", - "keyClusterSetOptionPassword", slots->acc->cc->errstr); - return slots; - } - } - - sts = keyClusterSetOptionConnectTimeout(slots->acc->cc, connection_timeout); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set connect timeout: %s\n", - "keySlotsInit", slots->acc->errstr); - return slots; - } - - sts = keyClusterSetOptionTimeout(slots->acc->cc, command_timeout); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set command timeout: %s\n", - "keySlotsInit", slots->acc->cc->errstr); - return slots; - } - - sts = keyClusterLibuvAttach(slots->acc, slots->events); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to attach to event loop: %s\n", - "keySlotsInit", slots->acc->errstr); - return slots; - } - - sts = keyClusterAsyncSetConnectCallback(slots->acc, key_server_connect_callback); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set connect callback: %s\n", - "keySlotsInit", slots->acc->errstr); - return slots; - } + */ + + /* Initialize timeout values in the context structure */ + context->connection_timeout.tv_sec = 5; + context->connection_timeout.tv_usec = 0; + context->command_timeout.tv_sec = 60; + context->command_timeout.tv_usec = 0; + + context->opts.initial_nodes = servers; // sds string + context->opts.username = (username && sdslen(username)) ? username : NULL; + context->opts.password = (password && sdslen(password)) ? password : NULL; + context->opts.connect_timeout = &context->connection_timeout; // struct timeval* + context->opts.command_timeout = &context->command_timeout; // struct timeval* + context->opts.async_connect_callback = key_server_connect_callback; + context->opts.async_disconnect_callback = key_server_disconnect_callback; +#if defined(HAVE_LIBUV) + context->opts.attach_fn = keyLibuvAttachAdapter; + context->opts.attach_data = (uv_loop_t *)events; +#endif - sts = keyClusterAsyncSetDisconnectCallback(slots->acc, key_server_disconnect_callback); - if (sts != RESP_OK) { - pmNotifyErr(LOG_ERR, "%s: failed to set disconnect callback: %s\n", - "keySlotsInit", slots->acc->errstr); - return slots; - } + /* Note: actual connection happens in keySlotsReconnect(), not here. + * This avoids creating a context that gets immediately freed and recreated. */ + context->slots.acc = NULL; + + sdsfree(def_servers); /* Coverity CID370634 */ - return slots; + return context; } /** - * despite the name, this function also handles the initial - * connection to the key server + * Handles reconnection logic and schema loading after initial + * connection (which happens in keySlotsInit with libvalkey) */ void keySlotsReconnect(keySlots *slots, keySlotsFlags flags, keysInfoCallBack info, keysDoneCallBack done, - void *userdata, void *events, void *arg) + void *userdata, void *events, void *arg, valkeyClusterOptions *opts) { - dictIterator *iterator; dictEntry *entry; - int sts = 0; static int log_connection_errors = 1; if (slots == NULL) @@ -310,28 +264,69 @@ keySlotsReconnect(keySlots *slots, keySlotsFlags flags, slots->state = SLOTS_CONNECTING; slots->conn_seq++; - /* reset key server context in case of reconnect */ - if (slots->acc->err) { - /* reset possible 'Connection refused' error before reconnecting */ - slots->acc->err = 0; - memset(slots->acc->errstr, '\0', strlen(slots->acc->errstr)); - } - keyClusterAsyncDisconnect(slots->acc); + /* Free old async context if this is a reconnect */ + if (slots->acc != NULL) { + /* reset key server context in case of reconnect */ + if (slots->acc->err) { + /* reset possible 'Connection refused' error before reconnecting */ + slots->acc->err = 0; + memset(slots->acc->errstr, '\0', strlen(slots->acc->errstr)); + } + keyClusterAsyncDisconnect(slots->acc); - /* reset keySlots in case of reconnect */ - slots->cluster = 0; - slots->search = 0; - dictEmpty(slots->keymap, NULL); + /* reset keySlots in case of reconnect */ + slots->cluster = 0; + slots->search = 0; + /* Clear the keymap by iterating and deleting all entries. + * Note: Must free the values (malloc'd int64_t* pointers) before + * deleting entries since sdsKeyDictCallBacks has no valDestructor. */ + { + dictIterator iter; + dictEntry *e; + dictInitIterator(&iter, slots->keymap->dict); + while ((e = dictNext(&iter)) != NULL) { + void *val = dictGetVal(e); + if (val != NULL) { + free(val); /* Free the malloc'd int64_t* position pointer */ + } + dictDelete(slots->keymap->dict, dictGetKey(e)); + } + } - sts = keyClusterConnect2(slots->acc->cc); - if (sts == RESP_OK) { + /* Free old async context completely and create a new one. + * libvalkey doesn't support reconnecting an existing async context, + * so we need to free the old one and create a new one. */ + keyClusterAsyncFree(slots->acc); + slots->acc = NULL; + } + + /* Enable blocking initial slot map update for reconnect so the context + * is immediately ready to accept commands when the function returns. */ + int saved_options = opts->options; + opts->options |= KEYOPT_BLOCKING_INITIAL_UPDATE; + + /* Create new async connection using the same options */ + slots->acc = keyClusterAsyncContextInit(opts); + + /* Restore original options */ + opts->options = saved_options; + + if (slots->acc == NULL) { + pmNotifyErr(LOG_ERR, "%s: failed to create new async context\n", + "keySlotsReconnect"); + slots->state = SLOTS_DISCONNECTED; + return; + } + if (slots->acc->err == 0) { slots->cluster = 1; } - else if (slots->acc->cc->err && - strcmp(slots->acc->cc->errstr, RESP_ENOCLUSTER) == 0) { + else if (slots->acc->err && + strcmp(slots->acc->errstr, RESP_ENOCLUSTER) == 0) { /* key server instance has cluster support disabled */ - slots->acc->cc->err = 0; - memset(slots->acc->cc->errstr, '\0', strlen(slots->acc->cc->errstr)); + /* Clear error (following valkeyClusterAsyncClearError pattern) */ + slots->acc->cc.err = 0; + slots->acc->cc.errstr[0] = '\0'; + slots->acc->err = slots->acc->cc.err; /* sync acc->err from cc->err */ slots->cluster = 0; /* @@ -339,21 +334,38 @@ keySlotsReconnect(keySlots *slots, keySlotsFlags flags, * is configured, but cluster mode is disabled * otherwise all other nodes silently don't get any data */ - iterator = dictGetSafeIterator(slots->acc->cc->nodes); - entry = dictNext(iterator); - if (entry && dictNext(iterator)) { - dictReleaseIterator(iterator); - pmNotifyErr(LOG_ERR, "%s: more than one node is configured, " - "but cluster mode is disabled", "keySlotsReconnect"); - slots->state = SLOTS_ERR_FATAL; - return; + { + dictIterator iter; + dictInitIterator(&iter, slots->acc->cc.nodes); + entry = dictNext(&iter); + if (entry && dictNext(&iter)) { + pmNotifyErr(LOG_ERR, "%s: more than one node is configured, " + "but cluster mode is disabled", "keySlotsReconnect"); + slots->state = SLOTS_ERR_FATAL; + return; + } } - dictReleaseIterator(iterator); } else { if (log_connection_errors || pmDebugOptions.desperate) { - pmNotifyErr(LOG_INFO, "Cannot connect to key server: %s\n", - slots->acc->cc->errstr); + /* acc->errstr should point to acc->cc.errstr, but be defensive */ + const char *errstr = NULL; + if (slots->acc->errstr != NULL && slots->acc->errstr[0] != '\0') { + errstr = slots->acc->errstr; + } else if (slots->acc->cc.errstr[0] != '\0') { + errstr = slots->acc->cc.errstr; + } else { + /* Check if we can get more info from errno or error code */ + switch (slots->acc->err) { + case RESP_ERR_IO: + errstr = "Connection refused"; + break; + default: + errstr = "Unknown error"; + break; + } + } + pmNotifyErr(LOG_INFO, "Cannot connect to key server: %s\n", errstr); log_connection_errors = 0; } slots->state = SLOTS_DISCONNECTED; @@ -370,12 +382,12 @@ keySlotsReconnect(keySlots *slots, keySlotsFlags flags, * compatibility, the actual connection to the key server happens in * keySlotsReconnect() */ -keySlots * +keySlotsContext * keySlotsConnect(dict *config, keySlotsFlags flags, keysInfoCallBack info, keysDoneCallBack done, void *userdata, void *events, void *arg) { - keySlots *slots; + keySlotsContext *context; sds enabled, msg; if (!(enabled = pmIniFileLookup(config, "resp", "enabled"))) @@ -383,8 +395,8 @@ keySlotsConnect(dict *config, keySlotsFlags flags, if (enabled && strcmp(enabled, "false") == 0) return NULL; - slots = keySlotsInit(config, events); - if (slots == NULL) { + context = keySlotsInit(config, events); + if (context == NULL) { msg = NULL; infofmt(msg, "Failed to allocate memory for key server slots"); info(PMLOG_ERROR, msg, arg); @@ -392,8 +404,8 @@ keySlotsConnect(dict *config, keySlotsFlags flags, return NULL; } - keySlotsReconnect(slots, flags, info, done, userdata, events, arg); - return slots; + keySlotsReconnect(&context->slots, flags, info, done, userdata, events, arg, &context->opts); + return context; } void @@ -401,11 +413,64 @@ keySlotsFree(keySlots *slots) { keyClusterAsyncDisconnect(slots->acc); keyClusterAsyncFree(slots->acc); - dictRelease(slots->keymap); + if (slots->keymap) { + if (slots->keymap->privdata) + sdsfree((sds)slots->keymap->privdata); + /* Free all values (malloc'd int64_t* pointers) before releasing dict. + * sdsKeyDictCallBacks has no valDestructor, so we must do this manually. */ + if (slots->keymap->dict) { + dictIterator iter; + dictEntry *e; + dictInitIterator(&iter, slots->keymap->dict); + while ((e = dictNext(&iter)) != NULL) { + void *val = dictGetVal(e); + if (val != NULL) { + free(val); + } + } + dictRelease(slots->keymap->dict); + } + free(slots->keymap); + } memset(slots, 0, sizeof(*slots)); free(slots); } +void +keySlotsContextFree(keySlotsContext *context) +{ + if (context == NULL) + return; + + /* Set state to disconnected BEFORE invoking callbacks to prevent + * callbacks from issuing new requests during shutdown */ + context->slots.state = SLOTS_DISCONNECTED; + + keyClusterAsyncDisconnect(context->slots.acc); + keyClusterAsyncFree(context->slots.acc); + if (context->slots.keymap) { + if (context->slots.keymap->privdata) + sdsfree((sds)context->slots.keymap->privdata); + /* Free all values (malloc'd int64_t* pointers) before releasing dict. + * sdsKeyDictCallBacks has no valDestructor, so we must do this manually. */ + if (context->slots.keymap->dict) { + dictIterator iter; + dictEntry *e; + dictInitIterator(&iter, context->slots.keymap->dict); + while ((e = dictNext(&iter)) != NULL) { + void *val = dictGetVal(e); + if (val != NULL) { + free(val); + } + } + dictRelease(context->slots.keymap->dict); + } + free(context->slots.keymap); + } + memset(context, 0, sizeof(*context)); + free(context); +} + static inline uint64_t gettimeusec(void) { @@ -567,7 +632,8 @@ keySlotsRequest(keySlots *slots, const sds cmd, keySlotsReplyCallback, srd, cmd, size)) != RESP_OK) { mmv_inc(slots->map, slots->metrics[SLOT_REQUESTS_ERROR]); pmNotifyErr(LOG_ERR, "%s: %s (%s)\n", "keySlotsRequest", - slots->acc->errstr, cmd); + slots->acc->errstr, cmd); + keySlotsReplyDataFree(srd); return -ENOMEM; } @@ -583,9 +649,8 @@ int keySlotsRequestFirstNode(keySlots *slots, const sds cmd, keyClusterCallbackFn *callback, void *arg) { - dictIterator *iterator; dictEntry *entry; - cluster_node *node; + valkeyClusterNode *node; keySlotsReplyData *srd; uint64_t size; int sts; @@ -597,9 +662,11 @@ keySlotsRequestFirstNode(keySlots *slots, const sds cmd, if (UNLIKELY(slots->state != SLOTS_CONNECTED && slots->state != SLOTS_READY)) return -ENOTCONN; - iterator = dictGetSafeIterator(slots->acc->cc->nodes); - entry = dictNext(iterator); - dictReleaseIterator(iterator); + { + dictIterator iter; + dictInitIterator(&iter, slots->acc->cc.nodes); + entry = dictNext(&iter); + } if (!entry) { pmNotifyErr(LOG_ERR, "%s: No key server node configured.", "keySlotsRequestFirstNode"); @@ -619,11 +686,12 @@ keySlotsRequestFirstNode(keySlots *slots, const sds cmd, return -ENOMEM; } sts = keyClusterAsyncFormattedCommandToNode(slots->acc, node, - keySlotsReplyCallback, srd, cmd, size); + keySlotsReplyCallback, srd, cmd, size); if (sts != RESP_OK) { mmv_inc(slots->map, slots->metrics[SLOT_REQUESTS_ERROR]); pmNotifyErr(LOG_ERR, "%s: %s (%s)\n", - "keySlotsRequestFirstNode", slots->acc->errstr, cmd); + "keySlotsRequestFirstNode", slots->acc->errstr, cmd); + keySlotsReplyDataFree(srd); return -ENOMEM; } @@ -681,8 +749,8 @@ keySlotsProxyConnect(keySlots *slots, keysInfoCallBack info, reply->type == RESP_REPLY_MAP || reply->type == RESP_REPLY_SET) cmd = sdsnew(reply->element[0]->str); - if (cmd && (entry = dictFind(slots->keymap, cmd)) != NULL) { - position = dictGetSignedIntegerVal(entry); + if (cmd && (entry = dictFind(slots->keymap->dict, cmd)) != NULL) { + position = *(int64_t *)dictGetVal(entry); if (position > 0 && position < reply->elements) hasKey = 1; } @@ -735,8 +803,8 @@ reportReplyError(keysInfoCallBack info, void *userdata, msg = sdscatfmt(msg, "\nRESP reply error: %s", reply->str); else if (acc->err) msg = sdscatfmt(msg, "\nRESP acc error: %s", acc->errstr); - else if (acc->cc->err) - msg = sdscatfmt(msg, "\nRESP cc error: %s", acc->cc->errstr); + else if (acc->cc.err) + msg = sdscatfmt(msg, "\nRESP cc error: %s", acc->cc.errstr); info(PMLOG_RESPONSE, msg, userdata); sdsfree(msg); } diff --git a/src/libpcp_web/src/slots.h b/src/libpcp_web/src/slots.h index d9209da0f44..7f92181e50d 100644 --- a/src/libpcp_web/src/slots.h +++ b/src/libpcp_web/src/slots.h @@ -14,7 +14,7 @@ #ifndef SLOTS_H #define SLOTS_H -#include +#include #include #include "batons.h" #include "keys.h" @@ -79,13 +79,22 @@ typedef struct keySlotsReplyData { typedef void (*keyPhase)(keySlots *, void *); /* phased operations */ +/* Wrapper struct that holds both keySlots and the original connection options */ +typedef struct keySlotsContext { + keySlots slots; /* the key slots structure */ + valkeyClusterOptions opts; /* original connection options */ + struct timeval connection_timeout; /* connect timeout storage */ + struct timeval command_timeout; /* command timeout storage */ +} keySlotsContext; + extern void keySlotsSetupMetrics(keySlots *); extern int keySlotsSetMetricRegistry(keySlots *, mmv_registry_t *); -extern keySlots *keySlotsInit(dict *, void *); -extern keySlots *keySlotsConnect(dict *, keySlotsFlags, +extern keySlotsContext *keySlotsInit(dict *, void *); +extern keySlotsContext *keySlotsConnect(dict *, keySlotsFlags, keysInfoCallBack, keysDoneCallBack, void *, void *, void *); extern void keySlotsReconnect(keySlots *, keySlotsFlags, - keysInfoCallBack, keysDoneCallBack, void *, void *, void *); + keysInfoCallBack, keysDoneCallBack, void *, void *, void *, valkeyClusterOptions *); +extern void keySlotsContextFree(keySlotsContext *); extern uint64_t keySlotsInflightRequests(keySlots *); extern int keySlotsRequest(keySlots *, sds, keyClusterCallbackFn *, void *); extern int keySlotsRequestFirstNode(keySlots *slots, const sds cmd, diff --git a/src/libpcp_web/src/timer.c b/src/libpcp_web/src/timer.c index b10564b24f1..9dcb56030d9 100644 --- a/src/libpcp_web/src/timer.c +++ b/src/libpcp_web/src/timer.c @@ -188,13 +188,13 @@ server_metrics_refresh(void *map) mmv_set(map, server.metrics[SERVER_MEM_DATASZ], &datasz); /* update global maps size metrics */ - value = contextmap? dictSize(contextmap) : 0; + value = contextmap? dictSize(contextmap->dict) : 0; mmv_set(map, server.metrics[SERVER_MAP_CONTEXT_SIZE], &value); - value = namesmap? dictSize(namesmap) : 0; + value = namesmap? dictSize(namesmap->dict) : 0; mmv_set(map, server.metrics[SERVER_MAP_METRIC_SIZE], &value); - value = labelsmap? dictSize(labelsmap) : 0; + value = labelsmap? dictSize(labelsmap->dict) : 0; mmv_set(map, server.metrics[SERVER_MAP_LABEL_SIZE], &value); - value = instmap? dictSize(instmap) : 0; + value = instmap? dictSize(instmap->dict) : 0; mmv_set(map, server.metrics[SERVER_MAP_INST_SIZE], &value); } diff --git a/src/libpcp_web/src/util.c b/src/libpcp_web/src/util.c index c9e7040cbf5..0f49c3fc976 100644 --- a/src/libpcp_web/src/util.c +++ b/src/libpcp_web/src/util.c @@ -576,11 +576,11 @@ pmwebapi_setup_context(context_t *cp) fprintf(stderr, "pmwebapi_setup_context: SHA1=%s [%s]\n", hashbuf, cp->name.sds); } - cp->pmids = dictCreate(&intKeyDictCallBacks, cp); /* pmid: metric */ - cp->metrics = dictCreate(&sdsKeyDictCallBacks, cp); /* name: metric */ - cp->indoms = dictCreate(&intKeyDictCallBacks, cp); /* id: indom */ - cp->domains = dictCreate(&intKeyDictCallBacks, cp); /* id: domain */ - cp->clusters = dictCreate(&intKeyDictCallBacks, cp);/* id: cluster */ + cp->pmids = dictCreate(&intKeyDictCallBacks); /* pmid: metric */ + cp->metrics = dictCreate(&sdsKeyDictCallBacks); /* name: metric */ + cp->indoms = dictCreate(&intKeyDictCallBacks); /* id: indom */ + cp->domains = dictCreate(&intKeyDictCallBacks); /* id: domain */ + cp->clusters = dictCreate(&intKeyDictCallBacks);/* id: cluster */ pmwebapi_locate_context(cp); } @@ -640,7 +640,6 @@ pmwebapi_free_context(context_t *cp) void pmwebapi_release_context(context_t *cp) { - dictIterator *iterator; dictEntry *entry; if (cp->context >= 0) { @@ -664,31 +663,31 @@ pmwebapi_release_context(context_t *cp) dictRelease(cp->metrics); /* but, one entry per name */ if (cp->pmids) { - iterator = dictGetIterator(cp->pmids); - while ((entry = dictNext(iterator)) != NULL) + dictIterator iter; + dictInitIterator(&iter, cp->pmids); + while ((entry = dictNext(&iter)) != NULL) pmwebapi_free_metric((metric_t *)dictGetVal(entry)); - dictReleaseIterator(iterator); dictRelease(cp->pmids); } if (cp->clusters) { - iterator = dictGetIterator(cp->clusters); - while ((entry = dictNext(iterator)) != NULL) + dictIterator iter; + dictInitIterator(&iter, cp->clusters); + while ((entry = dictNext(&iter)) != NULL) pmwebapi_free_cluster((cluster_t *)dictGetVal(entry)); - dictReleaseIterator(iterator); dictRelease(cp->clusters); } if (cp->indoms) { - iterator = dictGetIterator(cp->indoms); - while ((entry = dictNext(iterator)) != NULL) + dictIterator iter; + dictInitIterator(&iter, cp->indoms); + while ((entry = dictNext(&iter)) != NULL) pmwebapi_free_indom((indom_t *)dictGetVal(entry)); - dictReleaseIterator(iterator); dictRelease(cp->indoms); } if (cp->domains) { - iterator = dictGetIterator(cp->domains); - while ((entry = dictNext(iterator)) != NULL) + dictIterator iter; + dictInitIterator(&iter, cp->domains); + while ((entry = dictNext(&iter)) != NULL) pmwebapi_free_domain((domain_t *)dictGetVal(entry)); - dictReleaseIterator(iterator); dictRelease(cp->domains); } } @@ -804,7 +803,6 @@ pmwebapi_add_cluster_labels(struct context *context, struct cluster *cluster) void pmwebapi_free_indom(indom_t *indom) { - dictIterator *iterator; dictEntry *entry; sdsfree(indom->helptext); @@ -815,10 +813,10 @@ pmwebapi_free_indom(indom_t *indom) pmFreeLabelSets(indom->labelset, 1); if (indom->insts) { - iterator = dictGetIterator(indom->insts); - while ((entry = dictNext(iterator)) != NULL) + dictIterator iter; + dictInitIterator(&iter, indom->insts); + while ((entry = dictNext(&iter)) != NULL) pmwebapi_free_instance((instance_t *)dictGetVal(entry)); - dictReleaseIterator(iterator); dictRelease(indom->insts); } @@ -835,7 +833,7 @@ pmwebapi_new_indom(context_t *context, domain_t *domain, pmInDom key) return NULL; indom->indom = key; indom->domain = domain; - indom->insts = dictCreate(&intKeyDictCallBacks, indom); + indom->insts = dictCreate(&intKeyDictCallBacks); dictAdd(context->indoms, &key, (void *)indom); return indom; } @@ -917,8 +915,10 @@ pmwebapi_add_instances_labels(struct context *context, struct indom *indom) if ((length = labelsetlen(labels)) == 0) continue; inst = labelsets[i].inst; - if ((instance = dictFetchValue(indom->insts, &inst)) == NULL) + dictEntry *entry; + if ((entry = dictFind(indom->insts, &inst)) == NULL) continue; + instance = (instance_t *)dictGetVal(entry); if ((labels = pmwebapi_labelsetdup(labels)) == NULL) { if (pmDebugOptions.series) fprintf(stderr, "failed to dup %s instance labels: %s\n", @@ -1003,16 +1003,20 @@ pmwebapi_add_instance(struct indom *indom, int inst, char *name) if (name == NULL || (length = strlen(name)) == 0) return NULL; - if ((instance = dictFetchValue(indom->insts, &inst)) != NULL) { - /* has the external name changed for this internal identifier? */ - if ((sdslen(instance->name.sds) != length) || - (strncmp(instance->name.sds, name, length) != 0)) { - sdsclear(instance->name.sds); - instance->name.sds = sdscatlen(instance->name.sds, name, length); - pmwebapi_string_hash(instance->name.id, name, length); - pmwebapi_instance_hash(indom, instance); + { + dictEntry *entry; + if ((entry = dictFind(indom->insts, &inst)) != NULL) { + instance = (instance_t *)dictGetVal(entry); + /* has the external name changed for this internal identifier? */ + if ((sdslen(instance->name.sds) != length) || + (strncmp(instance->name.sds, name, length) != 0)) { + sdsclear(instance->name.sds); + instance->name.sds = sdscatlen(instance->name.sds, name, length); + pmwebapi_string_hash(instance->name.id, name, length); + pmwebapi_instance_hash(indom, instance); + } + return instance; } - return instance; } return pmwebapi_new_instance(indom, inst, sdsnewlen(name, length)); } @@ -1037,18 +1041,19 @@ pmwebapi_add_indom_instances(struct context *context, struct indom *indom) { struct instance *instance; unsigned int count = 0; - dictIterator *iterator; dictEntry *entry; char errmsg[PM_MAXERRMSGLEN], buffer[64], **namelist = NULL; int *instlist = NULL, i, sts; /* refreshing instance domain entries so mark all out-of-date first */ - iterator = dictGetIterator(indom->insts); - while ((entry = dictNext(iterator)) != NULL) { - instance = dictGetVal(entry); - instance->updated = 0; + { + dictIterator iter; + dictInitIterator(&iter, indom->insts); + while ((entry = dictNext(&iter)) != NULL) { + instance = (instance_t *)dictGetVal(entry); + instance->updated = 0; + } } - dictReleaseIterator(iterator); if ((sts = pmGetInDom(indom->indom, &instlist, &namelist)) >= 0) { for (i = 0; i < sts; i++) { @@ -1141,8 +1146,13 @@ pmwebapi_new_metric(context_t *cp, const sds name, pmDesc *desc, if (name && i == numnames) numextra = 1; - if ((metric = dictFetchValue(cp->pmids, &desc->pmid)) != NULL) - return pmwebapi_add_metric(cp, name, desc, numnames, names); + { + dictEntry *entry; + if ((entry = dictFind(cp->pmids, &desc->pmid)) != NULL) { + metric = (metric_t *)dictGetVal(entry); + return pmwebapi_add_metric(cp, name, desc, numnames, names); + } + } if ((metric = calloc(1, sizeof(metric_t))) == NULL) return NULL; @@ -1183,16 +1193,25 @@ pmwebapi_add_metric(context_t *cp, const sds base, pmDesc *desc, int numnames, c int i; /* search for a match on any of the given names */ - if (base && (metric = dictFetchValue(cp->metrics, base)) != NULL) - return metric; + { + dictEntry *entry; + if (base && (entry = dictFind(cp->metrics, base)) != NULL) { + metric = (metric_t *)dictGetVal(entry); + return metric; + } + } name = sdsempty(); for (i = 0; i < numnames; i++) { sdsclear(name); name = sdscat(name, names[i]); - if ((metric = dictFetchValue(cp->metrics, name)) != NULL) { - sdsfree(name); - return metric; + { + dictEntry *entry; + if ((entry = dictFind(cp->metrics, name)) != NULL) { + metric = (metric_t *)dictGetVal(entry); + sdsfree(name); + return metric; + } } } sdsfree(name); @@ -1366,3 +1385,4 @@ pmwebapi_nsectimestamp(sds s, struct timespec *timestamp) (unsigned int)timestamp->tv_nsec); return sdscatlen(s, buffer, length); } + diff --git a/src/libpcp_web/src/webgroup.c b/src/libpcp_web/src/webgroup.c index 9c7e203904e..1844177766c 100644 --- a/src/libpcp_web/src/webgroup.c +++ b/src/libpcp_web/src/webgroup.c @@ -178,11 +178,14 @@ webgroup_access(struct context *cp, sds hostspec, dict *params, /* add username from Basic Auth header if none given in hostspec */ if (params && cp->username == NULL) { - if ((value = dictFetchValue(params, AUTH_USERNAME)) != NULL) { + dictEntry *entry; + if ((entry = dictFind(params, AUTH_USERNAME)) != NULL) { + value = (sds)dictGetVal(entry); __pmHashAdd(PCP_ATTR_USERNAME, strdup(value), &attrs); cp->username = sdsdup(value); } - if ((value = dictFetchValue(params, AUTH_PASSWORD)) != NULL) { + if ((entry = dictFind(params, AUTH_PASSWORD)) != NULL) { + value = (sds)dictGetVal(entry); __pmHashAdd(PCP_ATTR_PASSWORD, strdup(value), &attrs); cp->password = sdsdup(value); } @@ -213,10 +216,15 @@ webgroup_new_context(pmWebGroupSettings *sp, dict *params, sds hostspec = NULL, timeout; if (params) { - if ((hostspec = dictFetchValue(params, PARAM_HOSTSPEC)) == NULL) - hostspec = dictFetchValue(params, PARAM_HOSTNAME); + dictEntry *entry; + if ((entry = dictFind(params, PARAM_HOSTSPEC)) == NULL) { + entry = dictFind(params, PARAM_HOSTNAME); + } + if (entry != NULL) + hostspec = (sds)dictGetVal(entry); - if ((timeout = dictFetchValue(params, PARAM_POLLTIME)) != NULL) { + if ((entry = dictFind(params, PARAM_POLLTIME)) != NULL) { + timeout = (sds)dictGetVal(entry); seconds = strtod(timeout, &endptr); if (*endptr != '\0') { infofmt(*message, "invalid timeout requested in polltime"); @@ -305,7 +313,7 @@ webgroup_timers_stop(struct webgroups *groups) static void webgroup_garbage_collect(struct webgroups *groups) { - dictIterator *iterator; + dictIterator iter; dictEntry *entry; context_t *cp; unsigned int count = 0, drops = 0, garbageset = 0, inactiveset = 0; @@ -316,10 +324,10 @@ webgroup_garbage_collect(struct webgroups *groups) /* do context GC if we get the lock (else don't block here) */ if (uv_mutex_trylock(&groups->mutex) == 0) { - iterator = dictGetSafeIterator(groups->contexts); - for (entry = dictNext(iterator); entry;) { + dictInitIterator(&iter, groups->contexts); + for (entry = dictNext(&iter); entry;) { cp = (context_t *)dictGetVal(entry); - entry = dictNext(iterator); + entry = dictNext(&iter); if (cp->privdata != groups) continue; if (cp->garbage) @@ -337,7 +345,6 @@ webgroup_garbage_collect(struct webgroups *groups) } count++; } - dictReleaseIterator(iterator); /* if dropping the last remaining context, do cleanup */ if (groups->active && drops == count) { @@ -439,7 +446,14 @@ webgroup_lookup_context(pmWebGroupSettings *sp, sds *id, dict *params, *status = -EINVAL; return NULL; } - cp = (struct context *)dictFetchValue(groups->contexts, &key); + { + dictEntry *entry; + entry = dictFind(groups->contexts, &key); + if (entry == NULL) + cp = NULL; + else + cp = (struct context *)dictGetVal(entry); + } if (cp == NULL) { infofmt(*message, "unknown context identifier: %u", key); *status = -ENOTCONN; @@ -600,8 +614,11 @@ pmWebGroupDerive(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int sts = 0; if (params) { - metric = dictFetchValue(params, PARAM_MNAME); - expr = dictFetchValue(params, PARAM_EXPR); + dictEntry *entry; + entry = dictFind(params, PARAM_MNAME); + metric = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_EXPR); + expr = entry ? (sds)dictGetVal(entry) : NULL; } else { metric = expr = NULL; } @@ -753,7 +770,11 @@ webgroup_fetch(pmWebGroupSettings *settings, context_t *cp, if (value->updated == 0) continue; inst = value->inst; - instance = dictFetchValue(indom->insts, &inst); + { + dictEntry *e; + e = dictFind(indom->insts, &inst); + instance = e ? (struct instance *)dictGetVal(e) : NULL; + } if (instance == NULL) { /* found an instance not in existing indom cache */ indom->updated = 0; /* invalidate this cache */ @@ -821,8 +842,14 @@ webgroup_lookup_pmid(pmWebGroupSettings *settings, context_t *cp, sds name, void "webgroup_lookup_pmid", name); return NULL; } - if ((mp = (struct metric *)dictFetchValue(cp->pmids, &pmid)) != NULL) - return mp; + { + dictEntry *entry; + entry = dictFind(cp->pmids, &pmid); + if (entry != NULL) { + mp = (struct metric *)dictGetVal(entry); + return mp; + } + } return pmwebapi_new_pmid(cp, NULL, pmid, settings->module.on_info, arg); } @@ -833,8 +860,14 @@ webgroup_lookup_metric(pmWebGroupSettings *settings, context_t *cp, sds name, vo pmID pmid; int sts; - if ((mp = dictFetchValue(cp->metrics, name)) != NULL) - return mp; + { + dictEntry *entry; + entry = dictFind(cp->metrics, name); + if (entry != NULL) { + mp = (struct metric *)dictGetVal(entry); + return mp; + } + } if ((sts = pmLookupName(1, (const char **)&name, &pmid)) < 0) { if (sts == PM_ERR_IPC) cp->setup = 0; @@ -898,10 +931,19 @@ pmWebGroupFetch(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int sts = 0, singular = 0, numnames = 0; if (params) { - if ((metrics = dictFetchValue(params, PARAM_MNAMES)) == NULL) { - if ((metrics = dictFetchValue(params, PARAM_MNAME)) == NULL) { - if ((pmids = dictFetchValue(params, PARAM_PMIDS)) == NULL) { - if ((pmids = dictFetchValue(params, PARAM_PMID)) != NULL) + dictEntry *entry; + entry = dictFind(params, PARAM_MNAMES); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + if (metrics == NULL) { + entry = dictFind(params, PARAM_MNAME); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + if (metrics == NULL) { + entry = dictFind(params, PARAM_PMIDS); + pmids = entry ? (sds)dictGetVal(entry) : NULL; + if (pmids == NULL) { + entry = dictFind(params, PARAM_PMID); + pmids = entry ? (sds)dictGetVal(entry) : NULL; + if (pmids != NULL) singular = 1; } } else { @@ -994,7 +1036,12 @@ webgroup_cache_indom(struct context *cp, pmInDom indom) struct domain *dp; struct indom *ip; - if ((ip = (struct indom *)dictFetchValue(cp->indoms, &indom)) != NULL) + { + dictEntry *entry; + entry = dictFind(cp->indoms, &indom); + ip = entry ? (struct indom *)dictGetVal(entry) : NULL; + } + if (ip != NULL) return ip; dp = pmwebapi_add_domain(cp, pmInDom_domain(indom)); return pmwebapi_new_indom(cp, dp, indom); @@ -1082,7 +1129,6 @@ webgroup_profile(struct context *cp, struct indom *ip, sds instnames, sds instids) { struct instance *instance; - dictIterator *iterator; dictEntry *entry; pmInDom indom; regex_t *regex; @@ -1110,28 +1156,30 @@ webgroup_profile(struct context *cp, struct indom *ip, regex = name_match_setup(match, numnames, names); - iterator = dictGetIterator(ip->insts); - while (insts && (entry = dictNext(iterator)) != NULL) { - instance = (instance_t *)dictGetVal(entry); - if (instance->updated == 0) - continue; + { + dictIterator iter; + dictInitIterator(&iter, ip->insts); + while (insts && (entry = dictNext(&iter)) != NULL) { + instance = (instance_t *)dictGetVal(entry); + if (instance->updated == 0) + continue; - found = 0; - if (numnames == 0 && numids == 0) - found = 1; - else if (numnames > 0 && - instance_name_match(instance, numnames, names, match, regex)) - found = 1; - else if (numids > 0 && instance_id_match(instance, numids, ids)) - found = 1; - if (found == 0) - continue; + found = 0; + if (numnames == 0 && numids == 0) + found = 1; + else if (numnames > 0 && + instance_name_match(instance, numnames, names, match, regex)) + found = 1; + else if (numids > 0 && instance_id_match(instance, numids, ids)) + found = 1; + if (found == 0) + continue; - /* add instance identifier to list */ - insts[count] = instance->inst; - count++; + /* add instance identifier to list */ + insts[count] = instance->inst; + count++; + } } - dictReleaseIterator(iterator); name_match_free(regex, numnames); sdsfreesplitres(names, numnames); @@ -1164,11 +1212,18 @@ pmWebGroupProfile(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int sts = 0; if (params) { - metric = dictFetchValue(params, PARAM_MNAME); - indomid = dictFetchValue(params, PARAM_INDOM); - inames = dictFetchValue(params, PARAM_INAME); - instids = dictFetchValue(params, PARAM_INSTANCE); - if ((expr = dictFetchValue(params, PARAM_EXPR)) != NULL) { + dictEntry *entry; + entry = dictFind(params, PARAM_MNAME); + metric = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INDOM); + indomid = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INAME); + inames = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INSTANCE); + instids = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_EXPR); + expr = entry ? (sds)dictGetVal(entry) : NULL; + if (expr != NULL) { if (strcmp(expr, "add") == 0) profile = PROFILE_ADD; else if (strcmp(expr, "del") != 0) { @@ -1177,7 +1232,9 @@ pmWebGroupProfile(pmWebGroupSettings *settings, sds id, dict *params, void *arg) goto done; } } - if ((match = dictFetchValue(params, PARAM_MATCH)) != NULL) { + entry = dictFind(params, PARAM_MATCH); + match = entry ? (sds)dictGetVal(entry) : NULL; + if (match != NULL) { if (strcmp(match, "regex") == 0) matches = MATCH_REGEX; else if (strcmp(match, "glob") == 0) @@ -1247,8 +1304,15 @@ pmWebGroupChildren(pmWebGroupSettings *settings, sds id, dict *params, void *arg int i, l, n, sts = 0, *status; if (params) { - if ((prefix = dictFetchValue(params, PARAM_PREFIX)) == NULL) - prefix = dictFetchValue(params, PARAM_MNAME); + { + dictEntry *entry; + entry = dictFind(params, PARAM_PREFIX); + prefix = entry ? (sds)dictGetVal(entry) : NULL; + if (prefix == NULL) { + entry = dictFind(params, PARAM_MNAME); + prefix = entry ? (sds)dictGetVal(entry) : NULL; + } + } } if (!(cp = webgroup_lookup_context(settings, &id, params, &sts, &msg, arg))) @@ -1319,15 +1383,15 @@ webgroup_instances(pmWebGroupSettings *settings, { struct instance *instance; pmWebInstance webinst; - dictIterator *iterator; + dictIterator iter; dictEntry *entry; regex_t *regex; int found; regex = name_match_setup(match, numnames, instnames); - iterator = dictGetIterator(ip->insts); - while ((entry = dictNext(iterator)) != NULL) { + dictInitIterator(&iter, ip->insts); + while ((entry = dictNext(&iter)) != NULL) { instance = (instance_t *)dictGetVal(entry); if (instance->updated == 0) continue; @@ -1350,7 +1414,6 @@ webgroup_instances(pmWebGroupSettings *settings, settings->callbacks.on_instance(cp->origin, &webinst, arg); } - dictReleaseIterator(iterator); name_match_free(regex, numnames); } @@ -1371,11 +1434,18 @@ pmWebGroupInDom(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int sts = 0, count = 0, numids = 0, numnames = 0; if (params) { - metric = dictFetchValue(params, PARAM_MNAME); - indomid = dictFetchValue(params, PARAM_INDOM); - instids = dictFetchValue(params, PARAM_INSTANCE); - instnames = dictFetchValue(params, PARAM_INAME); - if ((match = dictFetchValue(params, PARAM_MATCH)) != NULL) { + dictEntry *entry; + entry = dictFind(params, PARAM_MNAME); + metric = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INDOM); + indomid = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INSTANCE); + instids = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INAME); + instnames = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_MATCH); + match = entry ? (sds)dictGetVal(entry) : NULL; + if (match != NULL) { if (strcmp(match, "regex") == 0) matches = MATCH_REGEX; else if (strcmp(match, "glob") == 0) @@ -1595,9 +1665,17 @@ pmWebGroupMetric(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int i, sts = 0, numnames = 0; if (params) { - if ((prefix = dictFetchValue(params, PARAM_PREFIX)) == NULL && - (prefix = dictFetchValue(params, PARAM_MNAMES)) == NULL) - prefix = dictFetchValue(params, PARAM_MNAME); + dictEntry *entry; + entry = dictFind(params, PARAM_PREFIX); + prefix = entry ? (sds)dictGetVal(entry) : NULL; + if (prefix == NULL) { + entry = dictFind(params, PARAM_MNAMES); + prefix = entry ? (sds)dictGetVal(entry) : NULL; + } + if (prefix == NULL) { + entry = dictFind(params, PARAM_MNAME); + prefix = entry ? (sds)dictGetVal(entry) : NULL; + } if (prefix) { length = sdslen(prefix); names = sdssplitlen(prefix, length, ",", 1, &numnames); @@ -1826,7 +1904,11 @@ webgroup_scrape(pmWebGroupSettings *settings, context_t *cp, value = &metric->u.vlist->value[k]; if (value->updated == 0 || indom == NULL) continue; - instance = dictFetchValue(indom->insts, &value->inst); + { + dictEntry *e; + e = dictFind(indom->insts, &value->inst); + instance = e ? (struct instance *)dictGetVal(e) : NULL; + } if (instance == NULL) continue; v = webgroup_encode_value(v, type, &value->atom); @@ -1993,10 +2075,21 @@ pmWebGroupScrape(pmWebGroupSettings *settings, sds id, dict *params, void *arg) sds msg = NULL, *names = NULL, metrics; if (params) { - if ((metrics = dictFetchValue(params, PARAM_MNAMES)) == NULL) - if ((metrics = dictFetchValue(params, PARAM_MNAME)) == NULL) - if ((metrics = dictFetchValue(params, PARAM_PREFIX)) == NULL) - metrics = dictFetchValue(params, PARAM_TARGET); + dictEntry *entry; + entry = dictFind(params, PARAM_MNAMES); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + if (metrics == NULL) { + entry = dictFind(params, PARAM_MNAME); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + } + if (metrics == NULL) { + entry = dictFind(params, PARAM_PREFIX); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + } + if (metrics == NULL) { + entry = dictFind(params, PARAM_TARGET); + metrics = entry ? (sds)dictGetVal(entry) : NULL; + } } else { metrics = NULL; } @@ -2014,8 +2107,11 @@ pmWebGroupScrape(pmWebGroupSettings *settings, sds id, dict *params, void *arg) /* Add filtering information to scrape */ if (params) { - sds match = dictFetchValue(params, PARAM_MATCH); - sds filter = dictFetchValue(params, PARAM_FILTER); + dictEntry *entry; + entry = dictFind(params, PARAM_MATCH); + sds match = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_FILTER); + sds filter = entry ? (sds)dictGetVal(entry) : NULL; scrape.numfilters = 0; if (match != NULL) { @@ -2115,14 +2211,14 @@ store_add_profile(struct instore *store) } static void -store_found_insts(void *arg, const struct dictEntry *entry) +store_found_insts(void *arg, dictEntry *entry) { struct instore *store = (struct instore *)arg; pmValue *value; int i, id, sts; if (store->count < store->maximum) { - id = *(int *)entry->key; + id = *(int *)dictGetKey(entry); i = store_add_instid(store, id); value = &store->vset->vlist[i]; if ((sts = __pmStuffValue(store->atom, value, store->type)) < 0) @@ -2134,7 +2230,7 @@ store_found_insts(void *arg, const struct dictEntry *entry) } static void -store_named_insts(void *arg, const struct dictEntry *entry) +store_named_insts(void *arg, dictEntry *entry) { struct instore *store = (struct instore *)arg; struct instance *instance = (instance_t *)dictGetVal(entry); @@ -2158,7 +2254,6 @@ webgroup_store(struct context *context, struct metric *metric, pmValueSet *valueset = NULL; pmResult *result = NULL; size_t bytes; - long cursor = 0; int i, id, sts, count; if ((sts = __pmStringValue(value, &atom, metric->desc.type)) < 0) @@ -2213,10 +2308,14 @@ webgroup_store(struct context *context, struct metric *metric, store.names = names; store.maximum = count; store.numnames = numnames; - do { - cursor = dictScan(indom->insts, cursor, - store_named_insts, NULL, &store); - } while (cursor && store.status >= 0); + { + dictIterator iter; + dictEntry *entry; + dictInitIterator(&iter, indom->insts); + while ((entry = dictNext(&iter)) != NULL && store.status >= 0) { + store_named_insts(&store, entry); + } + } store_add_profile(&store); sts = store.status; } else { @@ -2247,11 +2346,17 @@ pmWebGroupStore(pmWebGroupSettings *settings, sds id, dict *params, void *arg) int sts = 0, numids = 0, numnames = 0; if (params) { - metric = dictFetchValue(params, PARAM_MNAME); - value = dictFetchValue(params, PARAM_MVALUE); - pmid = dictFetchValue(params, PARAM_PMID); - instids = dictFetchValue(params, PARAM_INSTANCE); - instnames = dictFetchValue(params, PARAM_INAME); + dictEntry *entry; + entry = dictFind(params, PARAM_MNAME); + metric = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_MVALUE); + value = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_PMID); + pmid = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INSTANCE); + instids = entry ? (sds)dictGetVal(entry) : NULL; + entry = dictFind(params, PARAM_INAME); + instnames = entry ? (sds)dictGetVal(entry) : NULL; } else { metric = value = pmid = instids = instnames = NULL; } @@ -2356,7 +2461,7 @@ pmWebGroupSetup(pmWebGroupModule *module) srandom(pid ^ (unsigned int)ts.tv_sec ^ (unsigned int)ts.tv_nsec); /* setup a dictionary mapping context number to data */ - groups->contexts = dictCreate(&intKeyDictCallBacks, NULL); + groups->contexts = dictCreate(&intKeyDictCallBacks); return 0; } @@ -2380,28 +2485,37 @@ pmWebGroupSetConfiguration(pmWebGroupModule *module, dict *config) char *endnum; sds value; - if ((value = dictFetchValue(config, WORK_TIMER)) == NULL) { - default_worker = DEFAULT_WORK_TIMER; - } else { - default_worker = strtoul(value, &endnum, 0); - if (*endnum != '\0') + { + dictEntry *entry; + entry = dictFind(config, WORK_TIMER); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value == NULL) { default_worker = DEFAULT_WORK_TIMER; - } + } else { + default_worker = strtoul(value, &endnum, 0); + if (*endnum != '\0') + default_worker = DEFAULT_WORK_TIMER; + } - if ((value = dictFetchValue(config, POLL_TIMEOUT)) == NULL) { - default_timeout = DEFAULT_POLL_TIMEOUT; - } else { - default_timeout = strtoul(value, &endnum, 0); - if (*endnum != '\0') + entry = dictFind(config, POLL_TIMEOUT); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value == NULL) { default_timeout = DEFAULT_POLL_TIMEOUT; - } + } else { + default_timeout = strtoul(value, &endnum, 0); + if (*endnum != '\0') + default_timeout = DEFAULT_POLL_TIMEOUT; + } - if ((value = dictFetchValue(config, BATCHSIZE)) == NULL) { - default_batchsize = DEFAULT_BATCHSIZE; - } else { - default_batchsize = strtoul(value, &endnum, 0); - if (*endnum != '\0') + entry = dictFind(config, BATCHSIZE); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value == NULL) { default_batchsize = DEFAULT_BATCHSIZE; + } else { + default_batchsize = strtoul(value, &endnum, 0); + if (*endnum != '\0') + default_batchsize = DEFAULT_BATCHSIZE; + } } if (groups) { @@ -2457,15 +2571,16 @@ void pmWebGroupClose(pmWebGroupModule *module) { struct webgroups *groups = (struct webgroups *)module->privdata; - dictIterator *iterator; dictEntry *entry; if (groups) { /* walk the contexts, stop timers and free resources */ - iterator = dictGetIterator(groups->contexts); - while ((entry = dictNext(iterator)) != NULL) - webgroup_drop_context((context_t *)dictGetVal(entry), NULL); - dictReleaseIterator(iterator); + { + dictIterator iter; + dictInitIterator(&iter, groups->contexts); + while ((entry = dictNext(&iter)) != NULL) + webgroup_drop_context((context_t *)dictGetVal(entry), NULL); + } dictRelease(groups->contexts); webgroup_timers_stop(groups); memset(groups, 0, sizeof(struct webgroups)); diff --git a/src/pmdas/bpf/bpf.c b/src/pmdas/bpf/bpf.c index a59dfeaecca..95bd1706316 100644 --- a/src/pmdas/bpf/bpf.c +++ b/src/pmdas/bpf/bpf.c @@ -192,7 +192,7 @@ bpf_load_modules(dict *cfg) int failure_count = 0; char *module_name; module *bpf_module; - dictIterator *iterator; + dictIterator iterator; dictEntry *entry; sds entry_key; sds entry_val; @@ -206,8 +206,8 @@ bpf_load_modules(dict *cfg) pmNotifyErr(LOG_INFO, "loading modules"); - iterator = dictGetIterator(cfg); - while((entry = dictNext(iterator)) != NULL) + dictInitIterator(&iterator, cfg); + while((entry = dictNext(&iterator)) != NULL) { entry_key = dictGetKey(entry); entry_val = dictGetVal(entry); @@ -271,7 +271,6 @@ bpf_load_modules(dict *cfg) module_count++; } - dictReleaseIterator(iterator); pmdaCacheOp(clusters, PMDA_CACHE_SAVE); if (failure_count > 0) pmNotifyErr(LOG_INFO, "loaded modules (%d), failed modules (%d)", module_count, failure_count); @@ -496,7 +495,7 @@ bpf_config_load() } } - config = dictCreate(&sdsDictCallBacks, NULL); + config = dictCreate(&sdsDictCallBacks); if (config == NULL) { pmNotifyErr(LOG_ERR, "could not init dictionary"); diff --git a/src/pmdas/bpf/bpf.h b/src/pmdas/bpf/bpf.h index 71312bc3761..4cd1325b05e 100644 --- a/src/pmdas/bpf/bpf.h +++ b/src/pmdas/bpf/bpf.h @@ -12,11 +12,10 @@ sdsHashCallBack(const void *key) } static int -sdsCompareCallBack(void *privdata, const void *key1, const void *key2) +sdsCompareCallBack(const void *key1, const void *key2) { int l1, l2; - (void)privdata; l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; @@ -24,15 +23,14 @@ sdsCompareCallBack(void *privdata, const void *key1, const void *key2) } static void * -sdsDupCallBack(void *privdata, const void *key) +sdsDupCallBack(const void *key) { return sdsdup((sds)key); } static void -sdsFreeCallBack(void *privdata, void *val) +sdsFreeCallBack(void *val) { - (void)privdata; sdsfree(val); } diff --git a/src/pmdas/statsd/src/aggregator-metric-labels.c b/src/pmdas/statsd/src/aggregator-metric-labels.c index 261d1447c12..a225ffbb988 100644 --- a/src/pmdas/statsd/src/aggregator-metric-labels.c +++ b/src/pmdas/statsd/src/aggregator-metric-labels.c @@ -47,7 +47,7 @@ create_labels_dict( .keyDestructor = str_hash_free_callback, .valDestructor = metric_label_free_callback, }; - labels* children = dictCreate(&metric_label_dict_callbacks, container->metrics_privdata); + labels* children = dictCreate(&metric_label_dict_callbacks); item->children = children; pthread_mutex_unlock(&container->mutex); } @@ -132,7 +132,7 @@ find_label_by_name( return 0; } if (out != NULL) { - struct metric_label* label = (struct metric_label*)result->v.val; + struct metric_label* label = (struct metric_label*)dictGetVal(result); *out = label; } pthread_mutex_unlock(&container->mutex); @@ -202,6 +202,7 @@ create_label( ALLOC_CHECK(meta, "Unable to allocate memory for metric label metadata."); (*out)->meta = meta; (*out)->type = METRIC_TYPE_NONE; + (*out)->config = config; meta->instance_label_segment_str = NULL; char* label_segment_identifier = create_instance_label_segment_str(datagram->tags); if (label_segment_identifier == NULL) { @@ -308,11 +309,12 @@ print_label_meta(struct agent_config* config, FILE* f, struct metric_label_metad void print_labels(struct agent_config* config, FILE* f, labels* l) { if (l == NULL) return; - dictIterator* iterator = dictGetSafeIterator(l); + dictIterator iterator; + dictInitIterator(&iterator, l); dictEntry* current; long int count = 1; - while ((current = dictNext(iterator)) != NULL) { - struct metric_label* item = (struct metric_label*)current->v.val; + while ((current = dictNext(&iterator)) != NULL) { + struct metric_label* item = (struct metric_label*)dictGetVal(current); fprintf(f, "---\n"); fprintf(f, "#%ld Label: \n", count); if (item->labels != NULL) { @@ -341,7 +343,6 @@ print_labels(struct agent_config* config, FILE* f, labels* l) { count++; } fprintf(f, "---\n"); - dictReleaseIterator(iterator); } /** diff --git a/src/pmdas/statsd/src/aggregator-metrics.c b/src/pmdas/statsd/src/aggregator-metrics.c index 23b432ba604..f43a6bff2d0 100644 --- a/src/pmdas/statsd/src/aggregator-metrics.c +++ b/src/pmdas/statsd/src/aggregator-metrics.c @@ -51,15 +51,9 @@ init_pmda_metrics(struct agent_config* config) { (struct pmda_metrics_container*) malloc(sizeof(struct pmda_metrics_container)); ALLOC_CHECK(container, "Unable to create PMDA metrics container."); pthread_mutex_init(&container->mutex, NULL); - struct pmda_metrics_dict_privdata* dict_data = - (struct pmda_metrics_dict_privdata*) malloc(sizeof(struct pmda_metrics_dict_privdata)); - ALLOC_CHECK(dict_data, "Unable to create priv PMDA metrics container data."); - dict_data->config = config; - dict_data->container = container; - metrics* m = dictCreate(&metric_dict_callbacks, dict_data); + metrics* m = dictCreate(&metric_dict_callbacks); container->metrics = m; container->generation = 0; - container->metrics_privdata = dict_data; return container; } @@ -232,11 +226,12 @@ write_metrics_to_file(struct agent_config* config, struct pmda_metrics_container VERBOSE_LOG(0, "Unable to open file for output."); return; } - dictIterator* iterator = dictGetSafeIterator(m); + dictIterator iterator; + dictInitIterator(&iterator, m); dictEntry* current; long int count = 0; - while ((current = dictNext(iterator)) != NULL) { - struct metric* item = (struct metric*)current->v.val; + while ((current = dictNext(&iterator)) != NULL) { + struct metric* item = (struct metric*)dictGetVal(current); switch (item->type) { case METRIC_TYPE_COUNTER: print_counter_metric(config, f, item); @@ -253,7 +248,6 @@ write_metrics_to_file(struct agent_config* config, struct pmda_metrics_container } count++; } - dictReleaseIterator(iterator); fprintf(f, "----------------\n"); fprintf(f, "Total number of records: %lu \n", count); fclose(f); @@ -280,7 +274,7 @@ find_metric_by_name(struct pmda_metrics_container* container, char* key, struct return 0; } if (out != NULL) { - struct metric* item = (struct metric*)result->v.val; + struct metric* item = (struct metric*)dictGetVal(result); *out = item; } pthread_mutex_unlock(&container->mutex); @@ -306,6 +300,7 @@ create_metric(struct agent_config* config, struct statsd_datagram* datagram, str (*out)->meta = create_metric_meta(datagram); (*out)->children = NULL; (*out)->committed = 0; + (*out)->config = config; int status = 0; (*out)->value = NULL; // this metric doesn't have root value diff --git a/src/pmdas/statsd/src/aggregator-metrics.h b/src/pmdas/statsd/src/aggregator-metrics.h index 22d06951655..7887fbc9cea 100644 --- a/src/pmdas/statsd/src/aggregator-metrics.h +++ b/src/pmdas/statsd/src/aggregator-metrics.h @@ -64,6 +64,7 @@ typedef struct metric_label { struct metric_label_metadata* meta; enum METRIC_TYPE type; // either this or parent reference, so that we know how to free void* value void* value; + struct agent_config* config; // needed for cleanup callbacks (libvalkey compat) } metric_label; typedef struct metric { @@ -73,6 +74,7 @@ typedef struct metric { labels* children; enum METRIC_TYPE type; void* value; + struct agent_config* config; // needed for cleanup callbacks (libvalkey compat) } metric; /** @@ -92,16 +94,10 @@ typedef struct duration_values_meta { typedef struct pmda_metrics_container { metrics* metrics; - struct pmda_metrics_dict_privdata* metrics_privdata; size_t generation; pthread_mutex_t mutex; } pmda_metrics_container; -typedef struct pmda_metrics_dict_privdata { - struct agent_config* config; - struct pmda_metrics_container* container; -} pmda_metrics_dict_privdata; - /** * Creates new pmda_metrics_container structure, initializes all stats to 0 */ diff --git a/src/pmdas/statsd/src/dict-callbacks.c b/src/pmdas/statsd/src/dict-callbacks.c index a061bde2369..1614dbf8a96 100644 --- a/src/pmdas/statsd/src/dict-callbacks.c +++ b/src/pmdas/statsd/src/dict-callbacks.c @@ -19,30 +19,33 @@ #include "utils.h" void -metric_label_free_callback(void* privdata, void* val) +metric_label_free_callback(void* val) { - struct agent_config* config = ((struct pmda_metrics_dict_privdata*)privdata)->config; - free_metric_label(config, (struct metric_label*)val); + struct metric_label* label = (struct metric_label*)val; + if (label != NULL && label->config != NULL) { + free_metric_label(label->config, label); + } } void -metric_free_callback(void* privdata, void* val) +metric_free_callback(void* val) { - struct agent_config* config = ((struct pmda_metrics_dict_privdata*)privdata)->config; - free_metric(config, (struct metric*)val); + struct metric* metric = (struct metric*)val; + if (metric != NULL && metric->config != NULL) { + free_metric(metric->config, metric); + } } void -str_hash_free_callback(void* privdata, void* key) { +str_hash_free_callback(void* key) { if (key != NULL) { free(key); } } void* -str_duplicate_callback(void* privdata, const void* key) +str_duplicate_callback(const void* key) { - (void)privdata; size_t length = strlen(key) + 1; char* duplicate = malloc(length); ALLOC_CHECK(duplicate, "Unable to duplicate key."); @@ -51,14 +54,13 @@ str_duplicate_callback(void* privdata, const void* key) } int -str_compare_callback(void* privdata, const void* key1, const void* key2) +str_compare_callback(const void* key1, const void* key2) { - (void)privdata; return strcmp((char*)key1, (char*)key2) == 0; } uint64_t str_hash_callback(const void* key) { - return dictGenCaseHashFunction((unsigned char*)key, strlen((char*)key)); + return dictGenHashFunction((unsigned char*)key, strlen((char*)key)); } diff --git a/src/pmdas/statsd/src/dict-callbacks.h b/src/pmdas/statsd/src/dict-callbacks.h index 4cef197825f..f1037a49df2 100644 --- a/src/pmdas/statsd/src/dict-callbacks.h +++ b/src/pmdas/statsd/src/dict-callbacks.h @@ -16,19 +16,19 @@ #define AGGREGATOR_METRIC_DICT_CALLBACKS_ extern void -metric_label_free_callback(void* privdata, void* val); +metric_label_free_callback(void* val); extern void -metric_free_callback(void* privdata, void* val); +metric_free_callback(void* val); extern void -str_hash_free_callback(void* privdata, void* key); +str_hash_free_callback(void* key); extern void* -str_duplicate_callback(void* privdata, const void* key); +str_duplicate_callback(const void* key); extern int -str_compare_callback(void* privdata, const void* key1, const void* key2); +str_compare_callback(const void* key1, const void* key2); extern uint64_t str_hash_callback(const void* key); diff --git a/src/pmdas/statsd/src/pmda-callbacks.c b/src/pmdas/statsd/src/pmda-callbacks.c index 4ffc56cf0d7..e919b8523c8 100644 --- a/src/pmdas/statsd/src/pmda-callbacks.c +++ b/src/pmdas/statsd/src/pmda-callbacks.c @@ -202,10 +202,11 @@ map_labels_to_instances(struct metric* item, struct pmda_data_extension* data, s size_t label_index = 0; static size_t keywords_count = 9; // - iterate - dictIterator* iterator = dictGetSafeIterator(item->children); + dictIterator iterator; + dictInitIterator(&iterator, item->children); dictEntry* current; - while ((current = dictNext(iterator)) != NULL) { - struct metric_label* label = (struct metric_label*)current->v.val; + while ((current = dictNext(&iterator)) != NULL) { + struct metric_label* label = (struct metric_label*)dictGetVal(current); // store on which instance domain instance index we find current label, item->meta->pcp_instance_map->labels[label_index] = label->labels; if (label->type == METRIC_TYPE_DURATION) { @@ -236,7 +237,6 @@ map_labels_to_instances(struct metric* item, struct pmda_data_extension* data, s } label_index++; } - dictReleaseIterator(iterator); data->pcp_instance_domains[indom_i].it_numinst = indom_i_inst_cnt; data->pcp_instance_domains[indom_i].it_set = instances; VERBOSE_LOG( @@ -430,14 +430,14 @@ statsd_map_stats(pmdaExt* pmda) { struct pmda_metrics_container* container = data->metrics_storage; pthread_mutex_lock(&container->mutex); metrics* m = container->metrics; - dictIterator* iterator = dictGetSafeIterator(m); + dictIterator iterator; + dictInitIterator(&iterator, m); dictEntry* current; - while ((current = dictNext(iterator)) != NULL) { - struct metric* item = (struct metric*)current->v.val; - char* key = (char*)current->key; + while ((current = dictNext(&iterator)) != NULL) { + struct metric* item = (struct metric*)dictGetVal(current); + char* key = (char*)dictGetKey(current); map_metric(key, item, pmda); } - dictReleaseIterator(iterator); data->generation = data->metrics_storage->generation; pthread_mutex_unlock(&container->mutex); @@ -767,7 +767,7 @@ statsd_label_callback(pmInDom in_dom, unsigned int inst, pmLabelSet** lp) { if (entry == NULL) { return 0; } - char* metric_key = (char*)entry->v.val; + char* metric_key = (char*)dictGetVal(entry); struct metric* item; int metric_found = find_metric_by_name(data->metrics_storage, metric_key, &item); if (!metric_found) { diff --git a/src/pmdas/statsd/src/pmdastatsd.c b/src/pmdas/statsd/src/pmdastatsd.c index b5eac43e988..ab0e852a350 100644 --- a/src/pmdas/statsd/src/pmdastatsd.c +++ b/src/pmdas/statsd/src/pmdastatsd.c @@ -159,8 +159,6 @@ free_shared_data(struct agent_config* config, struct pmda_data_extension* data) free(config->debug_output_filename); // remove metrics dictionary and related dictRelease(data->metrics_storage->metrics); - // privdata will be left behind, need to remove manually - free(data->metrics_storage->metrics_privdata); pthread_mutex_destroy(&data->metrics_storage->mutex); free(data->metrics_storage); // remove stats dictionary and related @@ -239,7 +237,7 @@ init_data_ext( create_statsd_hardcoded_instances(data); data->metrics_storage = metrics_storage; data->stats_storage = stats_storage; - data->instance_map = dictCreate(&instance_map_callbacks, NULL); + data->instance_map = dictCreate(&instance_map_callbacks); data->generation = -1; // trigger first mapping of metrics for PMNS data->notify = 0; } diff --git a/src/pmfind/GNUmakefile b/src/pmfind/GNUmakefile index 02aad740111..1d6481a88b3 100644 --- a/src/pmfind/GNUmakefile +++ b/src/pmfind/GNUmakefile @@ -14,6 +14,11 @@ TOPDIR = ../.. include $(TOPDIR)/src/include/builddefs +# Prepend libvalkey dict.h path so source.c finds it before PCP's dict.h +# Using override to prepend our include path before the PCP includes that builddefs added +ifeq "$(HAVE_LIBUV)" "true" +override CFLAGS := -I$(TOPDIR)/vendor/github.com/valkey-io/libvalkey/src $(CFLAGS) +endif CFILES = pmfind.c HFILES = groups.h diff --git a/src/pmfind/source.c b/src/pmfind/source.c index ea4f7652f69..f9230ee441a 100644 --- a/src/pmfind/source.c +++ b/src/pmfind/source.c @@ -52,7 +52,7 @@ sources_release(void *arg, const struct dictEntry *entry) { sources_t *sp = (sources_t *)arg; context_t *cp = (context_t *)dictGetVal(entry); - sds ctx = (sds)entry->key; + sds ctx = (sds)dictGetKey(entry); if (pmDebugOptions.discovery) fprintf(stderr, "releasing context %s\n", ctx); @@ -92,7 +92,21 @@ on_source_context(sds id, pmWebSource *src, void *arg) uv_mutex_lock(&sp->mutex); dictAdd(sp->contexts, id, cp); - entry = dictAddRaw(sp->uniq, src->source, NULL); + /* dictAddRaw replacement using libvalkey dict API */ + /* Check if key already exists */ + entry = dictFind(sp->uniq, src->source); + if (entry == NULL) { + /* Key doesn't exist, add it with NULL value */ + if (dictAdd(sp->uniq, src->source, NULL) == DICT_OK) { + /* Get the newly added entry */ + entry = dictFind(sp->uniq, src->source); + } else { + entry = NULL; + } + } else { + /* Key already exists, return NULL to indicate it was not newly discovered */ + entry = NULL; + } uv_mutex_unlock(&sp->mutex); if (entry) { /* source just discovered */ @@ -157,10 +171,13 @@ on_source_done(sds context, int status, sds message, void *arg) } if (release) { - unsigned long cursor = 0; - do { - cursor = dictScan(sp->contexts, cursor, sources_release, NULL, sp); - } while (cursor); + /* dictScan replacement using libvalkey iterator API */ + dictIterator iter; + dictEntry *entry; + dictInitIterator(&iter, sp->contexts); + while ((entry = dictNext(&iter)) != NULL) { + sources_release(sp, entry); + } } else if (pmDebugOptions.discovery) { fprintf(stderr, "not yet releasing (count=%d)\n", count); } @@ -183,7 +200,7 @@ sources_discovery_start(uv_timer_t *arg) { uv_handle_t *handle = (uv_handle_t *)arg; sources_t *sp = (sources_t *)handle->data; - dict *dp = dictCreate(&sdsOwnDictCallBacks, NULL); + dict *dp = dictCreate(&sdsOwnDictCallBacks); sds name, value; int i, fail = 0, total = sp->count; @@ -238,10 +255,10 @@ source_discovery(int count, char **urls) uv_mutex_init(&find.mutex); find.urls = urls; find.count = count; /* at least one PMWEBAPI request for each url */ - find.uniq = dictCreate(&sdsKeyDictCallBacks, NULL); - find.params = dictCreate(&sdsOwnDictCallBacks, NULL); + find.uniq = dictCreate(&sdsKeyDictCallBacks); + find.params = dictCreate(&sdsOwnDictCallBacks); dictAdd(find.params, sdsnew("name"), sdsnew("containers.state.running")); - find.contexts = dictCreate(&sdsKeyDictCallBacks, NULL); + find.contexts = dictCreate(&sdsKeyDictCallBacks); /* * Setup an async event loop and prepare for pmWebGroup API use diff --git a/src/pmproxy/src/GNUmakefile b/src/pmproxy/src/GNUmakefile index 16c277bfa6e..e2e59eb96b8 100644 --- a/src/pmproxy/src/GNUmakefile +++ b/src/pmproxy/src/GNUmakefile @@ -24,6 +24,8 @@ LDIRT = $(XFILES) LCFLAGS = $(PIECFLAGS) \ -I$(TOPDIR)/src/libpcp_web/src \ -I$(TOPDIR)/src/libpcp_web/src/deps \ + -I$(TOPDIR)/src/libpcp_web/src/deps/libvalkey/include \ + -I$(TOPDIR)/vendor/github.com/valkey-io/libvalkey/include \ -I$(TOPDIR)/src/libpcp_mmv/src LLDFLAGS = $(PIELDFLAGS) LLDLIBS = -lpcp_mmv $(PCP_WEBLIB) diff --git a/src/pmproxy/src/http.c b/src/pmproxy/src/http.c index 5f9036a10a5..6a3d50b3ab7 100644 --- a/src/pmproxy/src/http.c +++ b/src/pmproxy/src/http.c @@ -341,9 +341,11 @@ http_response_header(struct client *client, unsigned int length, http_code_t sts static sds http_header_value(struct client *client, sds header) { + dictEntry *entry; if (client->u.http.headers == NULL) return NULL; - return (sds)dictFetchValue(client->u.http.headers, header); + entry = dictFind(client->u.http.headers, header); + return entry ? (sds)dictGetVal(entry) : NULL; } static sds @@ -399,7 +401,6 @@ static sds http_response_trace(struct client *client, int sts) { struct http_parser *parser = &client->u.http.parser; - dictIterator *iterator; dictEntry *entry; char buffer[64]; sds header; @@ -413,10 +414,12 @@ http_response_trace(struct client *client, int sts) sts, http_status_mapping(sts), HEADER_CONNECTION); header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, 0); - iterator = dictGetSafeIterator(client->u.http.headers); - while ((entry = dictNext(iterator)) != NULL) - header = sdscatfmt(header, "%S: %S\r\n", dictGetKey(entry), dictGetVal(entry)); - dictReleaseIterator(iterator); + { + dictIterator iterator; + dictInitIterator(&iterator, client->u.http.headers); + while ((entry = dictNext(&iterator)) != NULL) + header = sdscatfmt(header, "%S: %S\r\n", dictGetKey(entry), dictGetVal(entry)); + } header = sdscatfmt(header, "Date: %s\r\n\r\n", http_date_string(time(NULL), buffer, sizeof(buffer))); @@ -797,7 +800,7 @@ http_parameters(const char *url, size_t length, dict **parameters) const char *p, *name, *value = NULL; int sts = 0, namelen = 0, valuelen = 0; - *parameters = dictCreate(&sdsOwnDictCallBacks, NULL); + *parameters = dictCreate(&sdsOwnDictCallBacks); for (p = name = url; p < end; p++) { if (*p == '=') { namelen = p - name; @@ -912,14 +915,14 @@ on_url(http_parser *request, const char *offset, size_t length) client->u.http.parser.method == HTTP_TRACE || client->u.http.parser.method == HTTP_HEAD) client->u.http.flags |= HTTP_FLAG_NO_BODY; - client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.headers = dictCreate(&sdsOwnDictCallBacks); } } /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */ else if (client->u.http.parser.method == HTTP_OPTIONS) { if (length == 1 && *offset == '*') { client->u.http.flags |= HTTP_FLAG_NO_BODY; - client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.headers = dictCreate(&sdsOwnDictCallBacks); } else { sts = client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; http_error(client, sts, "no handler for OPTIONS"); @@ -928,7 +931,7 @@ on_url(http_parser *request, const char *offset, size_t length) /* server trace - https://tools.ietf.org/html/rfc7231#section-4.3.8 */ else if (client->u.http.parser.method == HTTP_TRACE) { client->u.http.flags |= HTTP_FLAG_NO_BODY; - client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.headers = dictCreate(&sdsOwnDictCallBacks); } /* nothing available to respond to this request - inform the client */ else { @@ -973,7 +976,23 @@ on_header_field(http_parser *request, const char *offset, size_t length) * Insert this header into the dictionary (name only so far); * track this header for associating the value to it (below). */ - client->u.http.privdata = dictAddRaw(client->u.http.headers, field, NULL); + /* dictAddRaw replacement using libvalkey dict API */ + { + dictEntry *entry; + entry = dictFind(client->u.http.headers, field); + if (entry == NULL) { + /* Key doesn't exist, add it with NULL value */ + if (dictAdd(client->u.http.headers, field, NULL) == DICT_OK) { + /* Get the newly added entry */ + client->u.http.privdata = dictFind(client->u.http.headers, field); + } else { + client->u.http.privdata = NULL; + } + } else { + /* Key already exists, return the existing entry */ + client->u.http.privdata = entry; + } + } return 0; } @@ -1090,7 +1109,7 @@ on_headers_complete(http_parser *request) if (client->u.http.username) { if (!client->u.http.parameters) - client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks); http_add_parameter(client->u.http.parameters, "auth.username", 13, client->u.http.username, sdslen(client->u.http.username)); if (client->u.http.password) diff --git a/src/pmproxy/src/keys.c b/src/pmproxy/src/keys.c index 75a6c0a5425..71733dc93e6 100644 --- a/src/pmproxy/src/keys.c +++ b/src/pmproxy/src/keys.c @@ -116,7 +116,7 @@ on_key_client_read(struct proxy *proxy, struct client *client, fprintf(stderr, "%s: client %p\n", "on_key_client_read", client); if (key_server_resp == 0 || proxy->keys_setup == 0 || - keySlotsProxyConnect(proxy->slots, + keySlotsProxyConnect(&proxy->slotsctx->slots, proxylog, &client->u.keys.reader, buf->base, nread, on_key_server_reply, client) < 0) { client_close(client); @@ -145,7 +145,7 @@ on_key_server_connected(void *arg) message = sdsnew("Key server slots"); if (key_server_resp) message = sdscat(message, ", command keys"); - if ((search_queries = pmSearchEnabled(proxy->slots))) + if ((search_queries = pmSearchEnabled(&proxy->slotsctx->slots))) message = sdscat(message, ", search"); if (series_queries) message = sdscat(message, ", schema version"); @@ -172,7 +172,7 @@ on_key_server_connected(void *arg) pmDiscoverSetConfiguration(&key_server_discover.module, proxy->config); pmDiscoverSetMetricRegistry(&key_server_discover.module, registry); pmDiscoverSetup(&key_server_discover.module, &key_server_discover.callbacks, proxy); - pmDiscoverSetSlots(&key_server_discover.module, proxy->slots); + pmDiscoverSetSlots(&key_server_discover.module, &proxy->slotsctx->slots); } proxy->keys_setup = 1; @@ -209,15 +209,15 @@ key_server_reconnect_worker(void *arg) /* * skip if server is disabled or state is not SLOTS_DISCONNECTED */ - if (!proxy->slots || proxy->slots->state != SLOTS_DISCONNECTED) + if (!proxy->slotsctx || proxy->slotsctx->slots.state != SLOTS_DISCONNECTED) return; if (pmDebugOptions.desperate) - proxylog(PMLOG_INFO, "Trying to connect to key server ...", arg); + proxylog(PMLOG_INFO, "Trying to connect to key server ...", proxy); keySlotsFlags flags = get_key_slots_flags(); - keySlotsReconnect(proxy->slots, flags, proxylog, on_key_server_connected, - proxy, proxy->events, proxy); + keySlotsReconnect(&proxy->slotsctx->slots, flags, proxylog, on_key_server_connected, + proxy, proxy->events, proxy, &proxy->slotsctx->opts); } /* @@ -250,17 +250,19 @@ setup_keys_module(struct proxy *proxy) if ((option = pmIniFileLookup(config, "pmlogger", "enabled"))) archive_push = (strcmp(option, "true") == 0); - if (proxy->slots == NULL && + if (proxy->slotsctx == NULL && (key_server_resp || series_queries || search_queries || archive_discovery || archive_push)) { mmv_registry_t *registry = proxymetrics(proxy, METRICS_KEYS); keySlotsFlags flags = get_key_slots_flags(); - proxy->slots = keySlotsConnect(proxy->config, + proxy->slotsctx = keySlotsConnect(proxy->config, flags, proxylog, on_key_server_connected, proxy, proxy->events, proxy); - keySlotsSetMetricRegistry(proxy->slots, registry); - keySlotsSetupMetrics(proxy->slots); + + keySlotsSetMetricRegistry(&proxy->slotsctx->slots, registry); + keySlotsSetupMetrics(&proxy->slotsctx->slots); + pmWebTimerRegister(key_server_reconnect_worker, proxy); } } @@ -268,7 +270,7 @@ setup_keys_module(struct proxy *proxy) void * get_keys_module(struct proxy *proxy) { - if (proxy->slots == NULL) + if (proxy->slotsctx == NULL) setup_keys_module(proxy); return &key_server_discover.module; } @@ -283,9 +285,9 @@ reset_keys_module(struct proxy *proxy) void close_keys_module(struct proxy *proxy) { - if (proxy->slots) { - keySlotsFree(proxy->slots); - proxy->slots = NULL; + if (proxy->slotsctx) { + keySlotsContextFree(proxy->slotsctx); + proxy->slotsctx = NULL; } if (archive_discovery || archive_push) diff --git a/src/pmproxy/src/openmetrics.c b/src/pmproxy/src/openmetrics.c index ea001b5f2c5..fb250df8c63 100644 --- a/src/pmproxy/src/openmetrics.c +++ b/src/pmproxy/src/openmetrics.c @@ -53,19 +53,17 @@ labeladd(void *arg, const struct dictEntry *entry) pmWebLabelSet *labels = (pmWebLabelSet *)arg; labels->buffer = (sdslen(labels->buffer) == 0) ? /* first time */ - sdscatfmt(labels->buffer, "%S=%S", entry->key, entry->v.val) : - sdscatfmt(labels->buffer, ",%S=%S", entry->key, entry->v.val); + sdscatfmt(labels->buffer, "%S=%S", dictGetKey(entry), dictGetVal(entry)) : + sdscatfmt(labels->buffer, ",%S=%S", dictGetKey(entry), dictGetVal(entry)); } /* convert an array of PCP labelsets into Open Metrics form */ void open_metrics_labels(pmWebLabelSet *labels) { - unsigned long cursor = 0; pmLabelSet *labelset; pmLabel *label; dict *labeldict; - dictEntry *entry; const char *offset; static sds instname, instid; sds key, value; @@ -76,7 +74,7 @@ open_metrics_labels(pmWebLabelSet *labels) if (instid == NULL) instid = sdsnewlen("instid", 6); - labeldict = dictCreate(&sdsOwnDictCallBacks, NULL); + labeldict = dictCreate(&sdsOwnDictCallBacks); /* walk labelset in order adding labels to a temporary dictionary */ for (i = 0; i < labels->nsets; i++) { @@ -93,12 +91,15 @@ open_metrics_labels(pmWebLabelSet *labels) value = sdscatrepr(sdsempty(), offset, length); /* overwrite entries from earlier passes: label hierarchy */ - if ((entry = dictFind(labeldict, key)) == NULL) { - dictAdd(labeldict, key, value); /* new entry */ - } else { - sdsfree(key); - sdsfree(dictGetVal(entry)); - dictSetVal(labeldict, entry, value); + { + dictEntry *entry; + if ((entry = dictFind(labeldict, key)) == NULL) { + dictAdd(labeldict, key, value); /* new entry */ + } else { + sdsfree(key); + sdsfree(dictGetVal(entry)); + dictSetVal(labeldict, entry, value); + } } } } @@ -117,9 +118,14 @@ open_metrics_labels(pmWebLabelSet *labels) } /* finally produce the merged set of labels in the desired format */ - do { - cursor = dictScan(labeldict, cursor, labeladd, NULL, labels); - } while (cursor); + { + dictIterator iter; + dictEntry *entry; + dictInitIterator(&iter, labeldict); + while ((entry = dictNext(&iter)) != NULL) { + labeladd(labels, entry); + } + } dictRelease(labeldict); } diff --git a/src/pmproxy/src/opentelemetry.c b/src/pmproxy/src/opentelemetry.c index dca990a3304..f33e3970828 100644 --- a/src/pmproxy/src/opentelemetry.c +++ b/src/pmproxy/src/opentelemetry.c @@ -51,7 +51,7 @@ static void labeladd(void *arg, const struct dictEntry *entry) { sds *buffer = (sds *)arg; - sds value = entry->v.val; + sds value = (sds)dictGetVal(entry); const char *type; if (value[0] == '"') @@ -64,21 +64,19 @@ labeladd(void *arg, const struct dictEntry *entry) if (*buffer) *buffer = sdscatfmt(*buffer, ",{\"key\":\"%S\",\"value\":{\"%s\":%S}}", - entry->key, type, value); + dictGetKey(entry), type, value); else /* first label: alloc empty start string, and no leading comma */ *buffer = sdscatfmt(sdsempty(), "{\"key\":\"%S\",\"value\":{\"%s\":%S}}", - entry->key, type, value); + dictGetKey(entry), type, value); } /* convert an array of PCP labelsets into Open Telemetry form */ void open_telemetry_labels(pmWebLabelSet *labels, struct dict **context, sds *buffer) { - unsigned long cursor = 0; pmLabelSet *labelset; pmLabel *label; - dictEntry *entry; const char *offset; static sds instname, instid; struct dict *labeldict = *context; @@ -93,7 +91,7 @@ open_telemetry_labels(pmWebLabelSet *labels, struct dict **context, sds *buffer) /* setup resource attributues based on the PCP context labels */ if (labeldict == NULL) { - labeldict = dictCreate(&sdsOwnDictCallBacks, NULL); + labeldict = dictCreate(&sdsOwnDictCallBacks); labelset = labels->sets[0]; /* context labels */ for (j = 0; j < labelset->nlabels; j++) { label = &labelset->labels[j]; @@ -111,7 +109,7 @@ open_telemetry_labels(pmWebLabelSet *labels, struct dict **context, sds *buffer) *context = labeldict; } - metric_labels = dictCreate(&sdsOwnDictCallBacks, NULL); + metric_labels = dictCreate(&sdsOwnDictCallBacks); /* walk remaining labelsets in order adding labels to */ for (i = 1; i < labels->nsets; i++) { @@ -128,12 +126,15 @@ open_telemetry_labels(pmWebLabelSet *labels, struct dict **context, sds *buffer) value = sdscatrepr(sdsempty(), offset, length); /* overwrite entries from earlier passes: label hierarchy */ - if ((entry = dictFind(metric_labels, key)) == NULL) { - dictAdd(metric_labels, key, value); /* new entry */ - } else { - sdsfree(key); - sdsfree(dictGetVal(entry)); - dictSetVal(metric_labels, entry, value); + { + dictEntry *entry; + if ((entry = dictFind(metric_labels, key)) == NULL) { + dictAdd(metric_labels, key, value); /* new entry */ + } else { + sdsfree(key); + sdsfree(dictGetVal(entry)); + dictSetVal(metric_labels, entry, value); + } } } } @@ -154,9 +155,14 @@ open_telemetry_labels(pmWebLabelSet *labels, struct dict **context, sds *buffer) /* finally produce the merged set of labels in the desired format */ sdsfree(*buffer); *buffer = NULL; - do { - cursor = dictScan(metric_labels, cursor, labeladd, NULL, buffer); - } while (cursor); + { + dictIterator iter; + dictEntry *entry; + dictInitIterator(&iter, metric_labels); + while ((entry = dictNext(&iter)) != NULL) { + labeladd(buffer, entry); + } + } dictRelease(metric_labels); } diff --git a/src/pmproxy/src/search.c b/src/pmproxy/src/search.c index cb06c8e2c47..09c1eec7319 100644 --- a/src/pmproxy/src/search.c +++ b/src/pmproxy/src/search.c @@ -403,10 +403,17 @@ pmsearch_setup_request_parameters(struct client *client, sdsfreesplitres(values, nvalues); } } - if ((value = (sds)dictFetchValue(parameters, PARAM_LIMIT))) - baton->request.count = strtoul(value, NULL, 0); - if ((value = (sds)dictFetchValue(parameters, PARAM_OFFSET))) - baton->request.offset = strtoul(value, NULL, 0); + { + dictEntry *entry; + entry = dictFind(parameters, PARAM_LIMIT); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value) + baton->request.count = strtoul(value, NULL, 0); + entry = dictFind(parameters, PARAM_OFFSET); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value) + baton->request.offset = strtoul(value, NULL, 0); + } break; case RESTKEY_SUGGEST: @@ -423,8 +430,13 @@ pmsearch_setup_request_parameters(struct client *client, } /* optional parameters - flags, result count and pagination offset */ baton->request.flags = 0; - if ((value = (sds)dictFetchValue(parameters, PARAM_LIMIT))) - baton->request.count = strtoul(value, NULL, 0); + { + dictEntry *entry; + entry = dictFind(parameters, PARAM_LIMIT); + value = entry ? (sds)dictGetVal(entry) : NULL; + if (value) + baton->request.count = strtoul(value, NULL, 0); + } break; case RESTKEY_INFO: @@ -542,7 +554,7 @@ pmsearch_servlet_setup(struct proxy *proxy) PARAM_LIMIT = sdsnew("limit"); PARAM_OFFSET = sdsnew("offset"); - pmSearchSetSlots(&pmsearch_settings.module, proxy->slots); + pmSearchSetSlots(&pmsearch_settings.module, &proxy->slotsctx->slots); pmSearchSetEventLoop(&pmsearch_settings.module, proxy->events); pmSearchSetConfiguration(&pmsearch_settings.module, proxy->config); pmSearchSetMetricRegistry(&pmsearch_settings.module, metric_registry); diff --git a/src/pmproxy/src/series.c b/src/pmproxy/src/series.c index ba9323c83c0..befb5def50e 100644 --- a/src/pmproxy/src/series.c +++ b/src/pmproxy/src/series.c @@ -1021,7 +1021,7 @@ pmseries_servlet_setup(struct proxy *proxy) PARAM_FINISH = sdsnew("finish"); PARAM_ZONE = sdsnew("zone"); - pmSeriesSetSlots(&pmseries_settings.module, proxy->slots); + pmSeriesSetSlots(&pmseries_settings.module, &proxy->slotsctx->slots); pmSeriesSetEventLoop(&pmseries_settings.module, proxy->events); pmSeriesSetConfiguration(&pmseries_settings.module, proxy->config); pmSeriesSetMetricRegistry(&pmseries_settings.module, metric_registry); diff --git a/src/pmproxy/src/server.c b/src/pmproxy/src/server.c index 4b705e1f3ef..f730782d7e3 100644 --- a/src/pmproxy/src/server.c +++ b/src/pmproxy/src/server.c @@ -46,7 +46,7 @@ proxylog(pmLogLevel level, sds message, void *arg) state = "- EPOCH - "; } else - state = proxy->slots ? "" : "- DISCONNECTED - "; + state = proxy->slotsctx ? "" : "- DISCONNECTED - "; switch (level) { case PMLOG_TRACE: @@ -792,8 +792,8 @@ static void close_proxy(struct proxy *proxy) { close_pcp_module(proxy); - close_http_module(proxy); close_keys_module(proxy); + close_http_module(proxy); close_secure_module(proxy); } diff --git a/src/pmproxy/src/server.h b/src/pmproxy/src/server.h index bb4a597670a..7c96ce92d2f 100644 --- a/src/pmproxy/src/server.h +++ b/src/pmproxy/src/server.h @@ -171,7 +171,7 @@ typedef struct proxy { SSL_CTX *ssl; __pmSecureConfig tls; #endif - keySlots *slots; /* mapping of key names to servers */ + keySlotsContext *slotsctx; /* key server context */ struct servlet *servlets; /* linked list of http URL handlers */ mmv_registry_t *metrics[NUM_REGISTRY]; /* performance metrics */ pmAtomValue *values[NUM_VALUES]; /* local metric values */ diff --git a/src/pmproxy/src/webapi.c b/src/pmproxy/src/webapi.c index e97fed4f1d7..c698418c992 100644 --- a/src/pmproxy/src/webapi.c +++ b/src/pmproxy/src/webapi.c @@ -593,13 +593,13 @@ add_dict_attribute(void *arg, const struct dictEntry *entry) { sds *out = (sds *)arg; - *out = add_sds_attribute(*out, entry->key, entry->v.val); + *out = add_sds_attribute(*out, dictGetKey(entry), dictGetVal(entry)); } static sds add_metrics_resource_attributes(pmWebGroupBaton *baton, sds result) { - unsigned long cursor = 0, count = 0; + unsigned long count = 0; assert(sdslen(result) == 0); result = sdscatlen(result, "{\"resourceMetrics\":[", 20); @@ -608,10 +608,15 @@ add_metrics_resource_attributes(pmWebGroupBaton *baton, sds result) result = sdscatlen(result, "{\"resource\":{\"attributes\":[", 27); baton->suffix = json_push_suffix(baton->suffix, JSON_FLAG_OBJECT); - do { - cursor = dictScan(baton->labels, cursor, add_dict_attribute, NULL, &result); - count++; - } while (cursor); + { + dictIterator iter; + dictEntry *entry; + dictInitIterator(&iter, baton->labels); + while ((entry = dictNext(&iter)) != NULL) { + add_dict_attribute(&result, entry); + count++; + } + } if (count) result = chop_final_character(result); return sdscatlen(result, "]},", 3); /* end attributes, resources */ @@ -1066,7 +1071,7 @@ pmwebapi_request_body(struct client *client, const char *content, size_t length) if (baton->restkey == RESTKEY_DERIVE && client->u.http.parser.method == HTTP_POST) { if (client->u.http.parameters == NULL) - client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks); dictAdd(client->u.http.parameters, sdsnewlen(PARAM_EXPR, sdslen(PARAM_EXPR)), sdsnewlen(content, length)); @@ -1074,7 +1079,7 @@ pmwebapi_request_body(struct client *client, const char *content, size_t length) if (baton->restkey == RESTKEY_STORE && client->u.http.parser.method == HTTP_POST) { if (client->u.http.parameters == NULL) - client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks, NULL); + client->u.http.parameters = dictCreate(&sdsOwnDictCallBacks); dictAdd(client->u.http.parameters, sdsnewlen(PARAM_VALUE, sdslen(PARAM_VALUE)), sdsnewlen(content, length)); diff --git a/vendor/github.com/Nordix/hiredis-cluster/.clang-format b/vendor/github.com/Nordix/hiredis-cluster/.clang-format deleted file mode 100644 index 1d563276591..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/.clang-format +++ /dev/null @@ -1,4 +0,0 @@ -BasedOnStyle: LLVM -IndentWidth: 4 -BreakBeforeTernaryOperators: false -ReflowComments: false \ No newline at end of file diff --git a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/ci.yml b/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/ci.yml deleted file mode 100644 index b901d4e3969..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/ci.yml +++ /dev/null @@ -1,137 +0,0 @@ -name: CI - -on: [push, pull_request] - -jobs: - checkers: - name: Run static checkers - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Run clang-format style check (.c and .h) - uses: jidicula/clang-format-action@v4.6.2 - - ubuntu: - name: ${{ matrix.cmake-build-type }}-build [${{ matrix.compiler }}, cmake-${{ matrix.cmake-version }} sanitizer="${{ matrix.sanitizer }}"] - runs-on: ubuntu-18.04 - strategy: - fail-fast: false - matrix: - compiler: [gcc-9, clang-10] - cmake-version: [3.19] - cmake-build-type: [Release, RelWithDebInfo] - sanitizer: ["", thread, undefined, leak, address] - include: - - compiler: gcc-4.8 - cmake-version: 3.11 - cmake-build-type: Release - sanitizer: "" - - compiler: gcc-5 - cmake-version: 3.12 - cmake-build-type: Release - sanitizer: "" - - compiler: gcc-6 - cmake-version: 3.13 - cmake-build-type: Release - sanitizer: "" - - compiler: gcc-7 - cmake-version: 3.14 - cmake-build-type: Release - sanitizer: "" - - compiler: gcc-8 - cmake-version: 3.15 - cmake-build-type: Release - sanitizer: "" - - compiler: clang-3.9 - cmake-version: 3.16 - cmake-build-type: Release - sanitizer: "" - - compiler: clang-7 - cmake-version: 3.17 - cmake-build-type: Release - sanitizer: "" - - compiler: clang-9 - cmake-version: 3.18 - cmake-build-type: Release - sanitizer: "" - - steps: - - name: Prepare - run: sudo apt install libevent-dev libuv1-dev libev-dev libglib2.0-dev ${{ matrix.compiler }} - - name: Setup cmake - # Temporary disabled due to actions-setup-cmake/issues/21 - # uses: jwlawson/actions-setup-cmake@v1.6 - # with: - # cmake-version: ${{ matrix.cmake-version }} - run: | - wget https://cmake.org/files/v${{ matrix.cmake-version }}/cmake-${{ matrix.cmake-version }}.0-Linux-x86_64.sh -O /tmp/cmake.sh - sudo sh /tmp/cmake.sh --prefix=/usr/local/ --exclude-subdir - # Make sure we use correct version - cmake --version | grep -c ${{ matrix.cmake-version }}.0 - - uses: actions/checkout@v3 - - name: Create build folder - run: cmake -E make_directory build - - name: Generate makefiles - shell: bash - env: - CC: ${{ matrix.compiler }} - working-directory: build - run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_SSL=ON -DUSE_SANITIZER=${{ matrix.sanitizer }} .. - - name: Build - shell: bash - working-directory: build - run: VERBOSE=1 make - - name: Setup clusters - shell: bash - working-directory: build - run: make start - - name: Wait for clusters to start.. - uses: kibertoad/wait-action@1.0.1 - with: - time: '20s' - - name: Run tests - shell: bash - working-directory: build - run: make CTEST_OUTPUT_ON_FAILURE=1 test - - name: Teardown clusters - working-directory: build - shell: bash - run: make stop - - name: Build examples - shell: bash - env: - CC: ${{ matrix.compiler }} - run: | - examples/using_cmake_externalproject/build.sh - examples/using_cmake_separate/build.sh - examples/using_make/build.sh - - macos: - name: macOS - runs-on: macos-latest - steps: - - name: Prepare - run: | - brew install cmake ninja openssl - - uses: actions/checkout@v3 - - name: Build - run: | - mkdir build && cd build - cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_SSL=ON - ninja -v - - windows: - name: Windows - runs-on: windows-latest - steps: - - uses: actions/checkout@v3 - - uses: ilammy/msvc-dev-cmd@v1 - - name: Prepare - run: | - choco install -y ninja - vcpkg install --triplet x64-windows libevent - - name: Build - run: | - mkdir build && cd build - cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=C:\vcpkg\scripts\buildsystems\vcpkg.cmake - ninja -v diff --git a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/coverity.yml b/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/coverity.yml deleted file mode 100644 index 1315c3e55ae..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/coverity.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: "Coverity" -on: - schedule: - - cron: '0 1 * * 0' - -jobs: - analyze: - name: Analyze - if: github.repository == 'Nordix/hiredis-cluster' - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - with: - fetch-depth: 1 - - - name: Download Coverity - run: | - cd .. - wget -q https://scan.coverity.com/download/linux64 --post-data "token=${COVERITY_TOKEN}&project=hiredis-cluster" -O coverity_tool.tgz - mkdir coverity - tar xzf coverity_tool.tgz --strip 1 -C coverity - echo "$(pwd)/coverity/bin" >> $GITHUB_PATH - env: - COVERITY_TOKEN: ${{ secrets.COVERITY_TOKEN }} - - - name: Prepare - run: | - sudo apt-get install -y libevent-dev gcc cmake - mkdir build; cd build - cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_SSL=ON .. - - - name: Build with Coverity - run: cov-build --dir cov-int make - working-directory: build - - - name: Submit the result to Coverity - run: | - tar czvf hiredis_cluster.tgz cov-int - curl \ - --form token=${COVERITY_TOKEN} \ - --form email=bjorn.a.svensson@est.tech \ - --form file=@hiredis_cluster.tgz \ - --form version=${GITHUB_SHA} \ - https://scan.coverity.com/builds?project=hiredis-cluster - working-directory: build - env: - COVERITY_TOKEN: ${{ secrets.COVERITY_TOKEN }} diff --git a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/redis_compability.yml b/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/redis_compability.yml deleted file mode 100644 index 6f2c93a8b7f..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/.github/workflows/redis_compability.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Redis compatibility testing - -on: [push, pull_request] - -jobs: - redis-comp: - name: Redis ${{ matrix.redis-version }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - - redis-version: 7.0.0 - - redis-version: 6.2.7 - - redis-version: 6.0.16 - - redis-version: 5.0.14 - steps: - - name: Prepare - run: sudo apt install libevent-dev - - - uses: actions/checkout@v3 - - - name: Create build folder - run: cmake -E make_directory build - - - name: Generate makefiles - shell: bash - working-directory: build - run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release -DTEST_WITH_REDIS_VERSION=${{ matrix.redis-version }} .. - - - name: Build - shell: bash - working-directory: build - run: VERBOSE=1 make - - - name: Setup clusters - shell: bash - working-directory: build - run: make start - - - name: Wait for clusters to start.. - uses: kibertoad/wait-action@1.0.1 - with: - time: '40s' - - - name: Run tests - shell: bash - working-directory: build - run: make CTEST_OUTPUT_ON_FAILURE=1 test - - - name: Teardown clusters - working-directory: build - shell: bash - run: make stop diff --git a/vendor/github.com/Nordix/hiredis-cluster/.gitignore b/vendor/github.com/Nordix/hiredis-cluster/.gitignore deleted file mode 100644 index 796b96d1c40..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build diff --git a/vendor/github.com/Nordix/hiredis-cluster/CHANGELOG.md b/vendor/github.com/Nordix/hiredis-cluster/CHANGELOG.md deleted file mode 100644 index e3eace37b7a..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/CHANGELOG.md +++ /dev/null @@ -1,73 +0,0 @@ -### 0.8.1 - Aug 31, 2022 - -* Fixed crash and use-after-free in the asynchronous API. -* Use identical warning flags in CMake and Makefile. -* Corrected CROSSSLOT errors to not to be retried. - -### 0.8.0 - Jun 15, 2022 - -* Basic Redis 7.0 support. -* SSL/TLS handling in separate library. -* Command timeout corrections. -* Builds on Windows and macOS. - -### 0.7.0 - Sep 22, 2021 - -* Added support for stream commands in regular API. -* Added support for authentication using AUTH with username. -* Added adapters for event libraries libuv, libev and GLib. -* Improved memory efficiency. -* Renamed API function `redisClusterSetOptionMaxRedirect()` - to `redisClusterSetOptionMaxRetry()`. - -### 0.6.0 - Feb 09, 2021 - -* Minimum required version of CMake changed to 3.11 (from 3.14) -* Re-added the Makefile for symmetry with hiredis, which also enables - support for statically-linked libraries. -* Improved examples -* Corrected crashes and leaks in OOM scenarios -* New API for sending commands to specific node -* New API for node iteration, can be used for sending commands - to some or all nodes. - -### 0.5.0 - Dec 07, 2020 - -* Renamed to [hiredis-cluster](https://github.com/Nordix/hiredis-cluster) -* The C library `hiredis` is an external dependency rather than a builtin part - of the cluster client, meaning that `hiredis` v1.0.0 or later can be used. -* Support for SSL/TLS introduced in Redis 6 -* Support for IPv6 -* Support authentication using AUTH -* Handle variable number of keys in command EXISTS -* Improved CMake build -* Code style guide (using clang-format) -* Improved testing -* Memory leak corrections and allocation failure handling - -### 0.4.0 - Jan 24, 2019 - -* Updated underlying hiredis version to 0.14.0 -* Added CMake files to enable Windows and Mac builds -* Fixed bug due to CLUSTER NODES reply format change - -https://github.com/heronr/hiredis-vip - -### 0.3.0 - Dec 07, 2016 - -* Support redisClustervCommand, redisClustervAppendCommand and redisClustervAsyncCommand api. (deep011) -* Add flags HIRCLUSTER_FLAG_ADD_OPENSLOT and HIRCLUSTER_FLAG_ROUTE_USE_SLOTS. (deep011) -* Support redisClusterCommandArgv related api. (deep011) -* Fix some serious bugs. (deep011) - -https://github.com/vipshop/hiredis-vip - -### 0.2.1 - Nov 24, 2015 - -This release support redis cluster api. - -* Add hiredis 0.3.1. (deep011) -* Support cluster synchronous API. (deep011) -* Support multi-key command(mget/mset/del) for redis cluster. (deep011) -* Support cluster pipelining. (deep011) -* Support cluster asynchronous API. (deep011) diff --git a/vendor/github.com/Nordix/hiredis-cluster/CMakeLists.txt b/vendor/github.com/Nordix/hiredis-cluster/CMakeLists.txt deleted file mode 100644 index fecb7a71360..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/CMakeLists.txt +++ /dev/null @@ -1,238 +0,0 @@ -cmake_minimum_required(VERSION 3.11) -project(hiredis-cluster) -include(GNUInstallDirs) - -# Options -option(DOWNLOAD_HIREDIS "Download the dependency hiredis from GitHub" ON) -option(ENABLE_SSL "Enable SSL/TLS support" OFF) -option(DISABLE_TESTS "Disable compilation of test" OFF) -option(ENABLE_IPV6_TESTS "Enable IPv6 tests requiring special prerequisites" OFF) -option(ENABLE_COVERAGE "Enable test coverage reporting" OFF) - -macro(getVersionBit name) - set(VERSION_REGEX "^#define ${name} (.+)$") - file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/hircluster.h" - VERSION_BIT REGEX ${VERSION_REGEX}) - string(REGEX REPLACE ${VERSION_REGEX} "\\1" ${name} "${VERSION_BIT}") -endmacro(getVersionBit) - -# Get version information from src -getVersionBit(HIREDIS_CLUSTER_MAJOR) -getVersionBit(HIREDIS_CLUSTER_MINOR) -getVersionBit(HIREDIS_CLUSTER_PATCH) -getVersionBit(HIREDIS_CLUSTER_SONAME) -set(VERSION "${HIREDIS_CLUSTER_MAJOR}.${HIREDIS_CLUSTER_MINOR}.${HIREDIS_CLUSTER_PATCH}") -message("Detected version: ${VERSION}") - -project(hiredis-cluster - VERSION "${VERSION}" - LANGUAGES C) - -set(CMAKE_C_STANDARD 99) - -# Build using a sanitizer -if(USE_SANITIZER) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=${USE_SANITIZER}") -endif() - -if(ENABLE_COVERAGE) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --coverage -O0" ) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage" ) -endif() - -SET(hiredis_cluster_sources - adlist.c - command.c - crc16.c - dict.c - hiarray.c - hircluster.c - hiutil.c) - -if(WIN32 OR MINGW) - add_compile_definitions(_CRT_SECURE_NO_WARNINGS WIN32_LEAN_AND_MEAN) - set(hiredis_cluster_sources - ${hiredis_cluster_sources} - hiredis_cluster.def) -endif() - -add_library(hiredis_cluster - SHARED - ${hiredis_cluster_sources}) - -if(NOT MSVC) - target_compile_options(hiredis_cluster PRIVATE -Wall -Wextra -pedantic -Werror - -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers) - - # Add extra defines when CMAKE_BUILD_TYPE is set to Debug - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DHI_ASSERT_PANIC -DHI_HAVE_BACKTRACE") - # Alternative: -DHI_ASSERT_LOG) -endif() - -set_target_properties(hiredis_cluster - PROPERTIES - VERSION "${HIREDIS_CLUSTER_SONAME}") - -if(DOWNLOAD_HIREDIS) - message("Downloading dependency 'hiredis'..") - - include(FetchContent) - FetchContent_Declare(hiredis - GIT_REPOSITORY https://github.com/redis/hiredis - GIT_TAG v1.0.0 - SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_deps/hiredis" - ) - - # Disable tests in hiredis - set(DISABLE_TESTS_OLD ${DISABLE_TESTS}) - set(DISABLE_TESTS ON CACHE INTERNAL "") - FetchContent_GetProperties(hiredis) - if(NOT hiredis_POPULATED) - FetchContent_Populate(hiredis) - add_subdirectory(${hiredis_SOURCE_DIR} ${hiredis_BINARY_DIR}) - endif() - set(DISABLE_TESTS ${DISABLE_TESTS_OLD} CACHE INTERNAL "") - - # Create an empty *-config.cmake for find_package - # See: https://github.com/abandonware-pjz37/cmake-find-package-include/blob/master/hooks/fetch.cmake - set(stub_dir "${CMAKE_CURRENT_BINARY_DIR}/generated/pkg") - - file(WRITE "${stub_dir}/hiredis-config.cmake" "") - set(hiredis_DIR ${stub_dir}) - # Set variables normally got from hiredis-config.cmake - set(hiredis_LIBRARIES hiredis::hiredis) - set(hiredis_INCLUDE_DIRS "${CMAKE_CURRENT_BINARY_DIR}/_deps") - - if(ENABLE_SSL) - file(WRITE "${stub_dir}/hiredis_ssl-config.cmake" "") - set(hiredis_ssl_DIR ${stub_dir}) - endif() - -else() - message("Expecting to find dependencies in path..") -endif() - -find_package(hiredis REQUIRED) - -if(NOT TARGET hiredis::hiredis) - # Add target to support older hiredis releases - add_library(hiredis::hiredis ALIAS hiredis) -endif() - -target_include_directories(hiredis_cluster PUBLIC - $ - $ - $) - -if(WIN32 OR MINGW) - target_link_libraries(hiredis_cluster PUBLIC ws2_32 hiredis::hiredis) -else() - target_link_libraries(hiredis_cluster PUBLIC hiredis::hiredis) -endif() - -if(ENABLE_SSL) - find_package(hiredis_ssl REQUIRED) - - if(NOT TARGET hiredis::hiredis_ssl) - # Add target to support older hiredis releases - add_library(hiredis::hiredis_ssl ALIAS hiredis_ssl) - endif() - - add_library(hiredis_cluster_ssl - SHARED hircluster_ssl.c) - set_target_properties(hiredis_cluster_ssl - PROPERTIES VERSION "${HIREDIS_CLUSTER_SONAME}") - target_link_libraries(hiredis_cluster_ssl - PRIVATE hiredis_cluster - PUBLIC hiredis::hiredis_ssl) -endif() - -if(NOT DISABLE_TESTS) - include(CTest) - add_subdirectory(tests) -endif() - -# Code formatting target -find_program(CLANG_FORMAT "clang-format") -file(GLOB_RECURSE FILES_TO_FORMAT - ${PROJECT_SOURCE_DIR}/*.[ch] -) -add_custom_target(format - COMMAND ${CLANG_FORMAT} -i ${FILES_TO_FORMAT} -) - -# Code coverage target -if(ENABLE_COVERAGE) - find_program(GCOVR "gcovr") - - add_custom_command(OUTPUT _run_gcovr - POST_BUILD - COMMAND ${GCOVR} -r ${CMAKE_SOURCE_DIR} --object-dir=${CMAKE_BINARY_DIR} --html-details coverage.html - COMMAND echo "Coverage report generated: ${CMAKE_BINARY_DIR}/coverage.html" - WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) - add_custom_target (coverage DEPENDS _run_gcovr) -endif() - -configure_file(hiredis_cluster.pc.in hiredis_cluster.pc @ONLY) - -install(TARGETS hiredis_cluster - EXPORT hiredis_cluster-targets - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) - -install(FILES hircluster.h adlist.h hiarray.h dict.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis_cluster) - -install(DIRECTORY adapters - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis_cluster) - -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster.pc - DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) - -export(EXPORT hiredis_cluster-targets - FILE ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster-targets.cmake - NAMESPACE hiredis_cluster::) - -set(CMAKE_CONF_INSTALL_DIR share/hiredis_cluster) -set(INCLUDE_INSTALL_DIR include) -include(CMakePackageConfigHelpers) -configure_package_config_file(hiredis_cluster-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster-config.cmake - INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} - PATH_VARS INCLUDE_INSTALL_DIR) - -install(EXPORT hiredis_cluster-targets - FILE hiredis_cluster-targets.cmake - NAMESPACE hiredis_cluster:: - DESTINATION ${CMAKE_CONF_INSTALL_DIR}) - -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster-config.cmake - DESTINATION ${CMAKE_CONF_INSTALL_DIR}) - -# Install target for hiredis_cluster_ssl -if(ENABLE_SSL) - configure_file(hiredis_cluster_ssl.pc.in hiredis_cluster_ssl.pc @ONLY) - - install(TARGETS hiredis_cluster_ssl - EXPORT hiredis_cluster_ssl-targets - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) - install(FILES hircluster_ssl.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis_cluster) - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster_ssl.pc - DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) - export(EXPORT hiredis_cluster_ssl-targets - FILE ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster_ssl-targets.cmake - NAMESPACE hiredis_cluster::) - set(CMAKE_CONF_INSTALL_DIR share/hiredis_cluster_ssl) - configure_package_config_file(hiredis_cluster_ssl-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster_ssl-config.cmake - INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} - PATH_VARS INCLUDE_INSTALL_DIR) - install(EXPORT hiredis_cluster_ssl-targets - FILE hiredis_cluster_ssl-targets.cmake - NAMESPACE hiredis_cluster:: - DESTINATION ${CMAKE_CONF_INSTALL_DIR}) - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_cluster_ssl-config.cmake - DESTINATION ${CMAKE_CONF_INSTALL_DIR}) -endif() diff --git a/vendor/github.com/Nordix/hiredis-cluster/CONTRIBUTING.md b/vendor/github.com/Nordix/hiredis-cluster/CONTRIBUTING.md deleted file mode 100644 index 8a2b10b2f81..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributing - -:tada:Thanks for taking the time to contribute!:tada: - -The following is a set of guidelines for contributing to hiredis-cluster. - -The basics about setting up the project, building and testing is covered in -the [README](README.md). - -## Coding conventions - -### Code style - -Adhere to the existing coding style and make sure to mimic best possible. - -### Code formatting - -To have a common look-and-feel [clang-format](https://clang.llvm.org/docs/ClangFormat.html) -is used for code formatting. The formatting rules can be applied to the -source code by running the following make target in your build directory: - -```sh -$ make format -``` - -### Test coverage - -Make sure changes are covered by tests. -Code coverage instrumentation can be enabled using a build option and -a detailed html report can be viewed using following example: - -```sh -$ mkdir -p build; cd build -$ cmake -DENABLE_COVERAGE=ON .. -$ make all test coverage -$ xdg-open ./coverage.html -``` - -The report generation requires that [gcovr](https://gcovr.com/en/stable/index.html) -is installed in your path. Any reporting tool of choice can be used, as long as -it reads .gcda and .gcno files created during the test run. - -## Submitting changes - -* Run the formatter before committing when contributing to this project (`make format`). -* Cover new behaviour with tests when possible. - -## Links - -* [clang-format](https://apt.llvm.org/) for code formatting diff --git a/vendor/github.com/Nordix/hiredis-cluster/Makefile b/vendor/github.com/Nordix/hiredis-cluster/Makefile deleted file mode 100644 index 5aaeefd89c2..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/Makefile +++ /dev/null @@ -1,187 +0,0 @@ -# Hiredis-cluster Makefile, based on the Makefile in hiredis. -# -# Copyright (C) 2021 Bjorn Svensson -# Copyright (C) 2010-2011 Salvatore Sanfilippo -# Copyright (C) 2010-2011 Pieter Noordhuis -# This file is released under the BSD license, see the COPYING file - -OBJ=adlist.o command.o crc16.o dict.o hiarray.o hircluster.o hiutil.o -EXAMPLES=hiredis-cluster-example -LIBNAME=libhiredis_cluster -PKGCONFNAME=hiredis_cluster.pc - -HIREDIS_CLUSTER_MAJOR=$(shell grep HIREDIS_CLUSTER_MAJOR hircluster.h | awk '{print $$3}') -HIREDIS_CLUSTER_MINOR=$(shell grep HIREDIS_CLUSTER_MINOR hircluster.h | awk '{print $$3}') -HIREDIS_CLUSTER_PATCH=$(shell grep HIREDIS_CLUSTER_PATCH hircluster.h | awk '{print $$3}') -HIREDIS_CLUSTER_SONAME=$(shell grep HIREDIS_CLUSTER_SONAME hircluster.h | awk '{print $$3}') - -# Installation related variables and target -PREFIX?=/usr/local -INCLUDE_PATH?=include/hiredis_cluster -LIBRARY_PATH?=lib -PKGCONF_PATH?=pkgconfig -INSTALL_INCLUDE_PATH= $(DESTDIR)$(PREFIX)/$(INCLUDE_PATH) -INSTALL_LIBRARY_PATH= $(DESTDIR)$(PREFIX)/$(LIBRARY_PATH) -INSTALL_PKGCONF_PATH= $(INSTALL_LIBRARY_PATH)/$(PKGCONF_PATH) - -# Fallback to gcc when $CC is not in $PATH. -CC:=$(shell sh -c 'type $${CC%% *} >/dev/null 2>/dev/null && echo $(CC) || echo gcc') -OPTIMIZATION?=-O3 -WARNINGS=-Wall -Wextra -pedantic -Werror -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -DEBUG_FLAGS?= -g -ggdb -REAL_CFLAGS=$(OPTIMIZATION) -std=c99 -fPIC $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) -REAL_LDFLAGS=$(LDFLAGS) - -DYLIBSUFFIX=so -STLIBSUFFIX=a -DYLIB_MINOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_CLUSTER_SONAME) -DYLIB_MAJOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_CLUSTER_MAJOR) -DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) - -DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(DYLIB_MINOR_NAME) -STLIBNAME=$(LIBNAME).$(STLIBSUFFIX) -STLIB_MAKE_CMD=$(AR) rcs - -SSL_OBJ=hircluster_ssl.o -SSL_LIBNAME=libhiredis_cluster_ssl -SSL_PKGCONFNAME=hiredis_cluster_ssl.pc -SSL_INSTALLNAME=install-ssl -SSL_DYLIB_MINOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME) -SSL_DYLIB_MAJOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) -SSL_DYLIBNAME=$(SSL_LIBNAME).$(DYLIBSUFFIX) -SSL_STLIBNAME=$(SSL_LIBNAME).$(STLIBSUFFIX) -SSL_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(SSL_DYLIB_MINOR_NAME) - -USE_SSL?=0 -ifeq ($(USE_SSL),1) - SSL_STLIB=$(SSL_STLIBNAME) - SSL_DYLIB=$(SSL_DYLIBNAME) - SSL_PKGCONF=$(SSL_PKGCONFNAME) - SSL_INSTALL=$(SSL_INSTALLNAME) - EXAMPLES+=hiredis-cluster-example-tls -endif - -# Platform-specific overrides -uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') - -ifeq ($(USE_SSL),1) -ifeq ($(uname_S),Linux) - REAL_LDFLAGS+=-lssl -lcrypto -endif -endif - -all: $(DYLIBNAME) $(SSL_DYLIBNAME) $(STLIBNAME) $(SSL_STLIBNAME) $(PKGCONFNAME) $(SSL_PKGCONFNAME) - -# Deps (use `USE_SSL=1 make dep` to generate this) -adlist.o: adlist.c adlist.h hiutil.h -command.o: command.c command.h adlist.h hiarray.h hiutil.h win32.h -crc16.o: crc16.c hiutil.h -dict.o: dict.c dict.h -hiarray.o: hiarray.c hiarray.h hiutil.h -hircluster.o: hircluster.c adlist.h command.h dict.h hiarray.h \ - hircluster.h hiutil.h win32.h -hiutil.o: hiutil.c hiutil.h win32.h -hircluster_ssl.o: hircluster_ssl.c hircluster_ssl.h hircluster.h dict.h - -$(DYLIBNAME): $(OBJ) - $(DYLIB_MAKE_CMD) -o $(DYLIBNAME) $(OBJ) $(REAL_LDFLAGS) - -$(STLIBNAME): $(OBJ) - $(STLIB_MAKE_CMD) $(STLIBNAME) $(OBJ) - -$(SSL_DYLIBNAME): $(SSL_OBJ) - $(SSL_DYLIB_MAKE_CMD) $(DYLIB_PLUGIN) -o $(SSL_DYLIBNAME) $(SSL_OBJ) $(REAL_LDFLAGS) $(SSL_LDFLAGS) - -$(SSL_STLIBNAME): $(SSL_OBJ) - $(STLIB_MAKE_CMD) $(SSL_STLIBNAME) $(SSL_OBJ) - -$(SSL_OBJ): hircluster_ssl.c - -dynamic: $(DYLIBNAME) $(SSL_DYLIB) -static: $(STLIBNAME) $(SSL_STLIB) - -# Binaries: -hiredis-cluster-example: examples/src/example.c - $(CC) -o examples/$@ $(REAL_CFLAGS) $< $(REAL_LDFLAGS) -hiredis-cluster-example-tls: examples/src/example_tls.c - $(CC) -o examples/$@ $(REAL_CFLAGS) $< $(REAL_LDFLAGS) - -examples: $(EXAMPLES) - -.c.o: - $(CC) -c $(REAL_CFLAGS) $< - -clean: - rm -rf $(DYLIBNAME) $(STLIBNAME) $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(PKGCONFNAME) $(SSL_PKGCONFNAME) examples/hiredis-cluster-example* *.o *.gcda *.gcno *.gcov - -dep: - $(CC) $(CPPFLAGS) $(CFLAGS) -MM *.c - -INSTALL?= cp -pPR - -$(PKGCONFNAME): hircluster.h - @echo "Generating $@ for pkgconfig..." - @echo prefix=$(PREFIX) > $@ - @echo exec_prefix=\$${prefix} >> $@ - @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ - @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ - @echo >> $@ - @echo Name: hiredis-cluster >> $@ - @echo Description: Minimalistic C client library for Redis Cluster. >> $@ - @echo Version: $(HIREDIS_CLUSTER_MAJOR).$(HIREDIS_CLUSTER_MINOR).$(HIREDIS_CLUSTER_PATCH) >> $@ - @echo Libs: -L\$${libdir} -lhiredis_cluster >> $@ - @echo Cflags: -I\$${includedir} -D_FILE_OFFSET_BITS=64 >> $@ - -$(SSL_PKGCONFNAME): hircluster_ssl.h - @echo "Generating $@ for pkgconfig..." - @echo prefix=$(PREFIX) > $@ - @echo exec_prefix=\$${prefix} >> $@ - @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ - @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ - @echo >> $@ - @echo Name: hiredis-cluster-ssl >> $@ - @echo Description: SSL support for hiredis-cluster. >> $@ - @echo Version: $(HIREDIS_CLUSTER_MAJOR).$(HIREDIS_CLUSTER_MINOR).$(HIREDIS_CLUSTER_PATCH) >> $@ - @echo Requires: hiredis >> $@ - @echo Libs: -L\$${libdir} -lhiredis_cluster_ssl >> $@ - @echo Libs.private: -lhiredis_ssl -lssl -lcrypto >> $@ - -install: $(DYLIBNAME) $(STLIBNAME) $(PKGCONFNAME) $(SSL_INSTALL) - mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_INCLUDE_PATH)/adapters $(INSTALL_LIBRARY_PATH) - $(INSTALL) adlist.h dict.h hiarray.h hircluster.h hiutil.h win32.h $(INSTALL_INCLUDE_PATH) - $(INSTALL) adapters/*.h $(INSTALL_INCLUDE_PATH)/adapters - $(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) - $(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH) - mkdir -p $(INSTALL_PKGCONF_PATH) - $(INSTALL) $(PKGCONFNAME) $(INSTALL_PKGCONF_PATH) - -install-ssl: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME) - mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) - $(INSTALL) hircluster_ssl.h $(INSTALL_INCLUDE_PATH) - $(INSTALL) $(SSL_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) - $(INSTALL) $(SSL_STLIBNAME) $(INSTALL_LIBRARY_PATH) - mkdir -p $(INSTALL_PKGCONF_PATH) - $(INSTALL) $(SSL_PKGCONFNAME) $(INSTALL_PKGCONF_PATH) - -32bit: - @echo "" - @echo "WARNING: if this fails under Linux you probably need to install libc6-dev-i386" - @echo "" - $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" - -32bit-vars: - $(eval CFLAGS=-m32) - $(eval LDFLAGS=-m32) - -gprof: - $(MAKE) CFLAGS="-pg" LDFLAGS="-pg" - -gcov: - $(MAKE) CFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS="-fprofile-arcs" - -noopt: - $(MAKE) OPTIMIZATION="" - -.PHONY: all clean dep install 32bit 32bit-vars gprof gcov noopt diff --git a/vendor/github.com/Nordix/hiredis-cluster/README.md b/vendor/github.com/Nordix/hiredis-cluster/README.md deleted file mode 100644 index ed2fb870c05..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/README.md +++ /dev/null @@ -1,396 +0,0 @@ -# Hiredis-cluster - -Hiredis-cluster is a C client library for cluster deployments of the -[Redis](http://redis.io/) database. - -Hiredis-cluster is using [Hiredis](https://github.com/redis/hiredis) for the -connections to each Redis node. - -Hiredis-cluster is a fork of Hiredis-vip, with the following improvements: - -* The C library `hiredis` is an external dependency rather than a builtin part - of the cluster client, meaning that the latest `hiredis` can be used. -* Support for SSL/TLS introduced in Redis 6 -* Support for IPv6 -* Support authentication using AUTH -* Uses CMake (3.11+) as the primary build system, but optionally Make can be used directly -* Code style guide (using clang-format) -* Improved testing -* Memory leak corrections and allocation failure handling -* Low-level API for sending commands to specific node - -## Features - -* Redis Cluster - * Connect to a Redis cluster and run commands. - -* Multi-key commands - * Support `MSET`, `MGET` and `DEL`. - * Multi-key commands will be processed and sent to slot owning nodes. - (This breaks the atomicity of the commands if the keys reside on different - nodes so if atomicity is important, use these only with keys in the same - cluster slot.) - -* Pipelining - * Send multiple commands at once to speed up queries. - * Supports multi-key commands described in above bullet. - -* Asynchronous API - * Send commands asynchronously and let a callback handle the response. - * Needs an external event loop system that can be attached using an adapter. - -* SSL/TLS - * Connect to Redis nodes using SSL/TLS (supported from Redis 6) - -* IPv6 - * Handles clusters on IPv6 networks - -## Build instructions - -Prerequisites: - -* A C compiler (GCC or Clang) -* CMake and GNU Make (but see [Alternative build using Makefile - directly](#alternative-build-using-makefile-directly) below for how to build - without CMake) -* [hiredis](https://github.com/redis/hiredis); downloaded automatically by - default, but see build options below -* [libevent](https://libevent.org/) (`libevent-dev` in Debian); can be avoided - if building without tests (DISABLE_TESTS=ON) -* OpenSSL (`libssl-dev` in Debian) if building with TLS support - -Hiredis-cluster will be built as a shared library `libhiredis_cluster.so` and -it depends on the hiredis shared library `libhiredis.so`. - -When SSL/TLS support is enabled an extra library `libhiredis_cluster_ssl.so` -is built, which depends on the hiredis SSL support library `libhiredis_ssl.a`. - -A user project that needs SSL/TLS support should link to both `libhiredis_cluster.so` -and `libhiredis_cluster_ssl.so` to enable the SSL/TLS configuration API. - -```sh -$ mkdir build; cd build -$ cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_SSL=ON .. -$ make -``` - -### Build options - -The following CMake options are available: - -* `DOWNLOAD_HIREDIS` - * `OFF` CMake will search for an already installed hiredis (for example the - the Debian package `libhiredis-dev`) for header files and linkage. - * `ON` (default) hiredis will be downloaded from - [Github](https://github.com/redis/hiredis), built and installed locally in - the build folder. -* `ENABLE_SSL` - * `OFF` (default) - * `ON` Enable SSL/TLS support and build its tests (also affect hiredis when - `DOWNLOAD_HIREDIS=ON`). -* `DISABLE_TESTS` - * `OFF` (default) - * `ON` Disable compilation of tests (also affect hiredis when - `DOWNLOAD_HIREDIS=ON`). -* `ENABLE_IPV6_TESTS` - * `OFF` (default) - * `ON` Enable IPv6 tests. Requires that IPv6 is - [setup](https://docs.docker.com/config/daemon/ipv6/) in Docker. -* `ENABLE_COVERAGE` - * `OFF` (default) - * `ON` Compile using build flags that enables the GNU coverage tool `gcov` - to provide test coverage information. This CMake option also enables a new - build target `coverage` to generate a test coverage report using - [gcovr](https://gcovr.com/en/stable/index.html). -* `USE_SANITIZER` - Compile using a specific sanitizer that detect issues. The value of this - option specifies which sanitizer to activate, but it depends on support in the - [compiler](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#index-fsanitize_003daddress). - Common option values are: `address`, `thread`, `undefined`, `leak` - -Options needs to be set with the `-D` flag when generating makefiles, e.g. - -`cmake -DENABLE_SSL=ON -DUSE_SANITIZER=address ..` - -### Build details - -The build uses CMake's [find_package](https://cmake.org/cmake/help/latest/command/find_package.html#search-procedure) -to search for a `hiredis` installation. CMake will search for a `hiredis` -installation in the default paths, searching for a file called `hiredis-config.cmake`. -The default search path can be altered via `CMAKE_PREFIX_PATH` or -as described in the CMake docs; a specific path can be set using a flag like: -`-Dhiredis_DIR:PATH=${MY_DIR}/hiredis/share/hiredis` - -See `examples/using_cmake_separate/build.sh` or -`examples/using_cmake_externalproject/build.sh` for alternative CMake builds. - -### Alternative build using Makefile directly - -When a simpler build setup is preferred a provided Makefile can be used directly -when building. A benefit of this, instead of using CMake, is that it also provides -a static library, a similar limitation exists in the CMake files in hiredis v1.0.0. - -The only option that exists in the Makefile is to enable SSL/TLS support via `USE_SSL=1` - -See [`examples/using_make/build.sh`](examples/using_make/build.sh) for an -example build. - -### Running the tests - -Some tests needs a Redis cluster and that can be setup by the make targets -`start`/`stop`. The clusters will be setup using Docker and it may take a while -for them to be ready and accepting requests. Run `make start` to start the -clusters and then wait a few seconds before running `make test`. -To stop the running cluster containers run `make stop`. - -```sh -$ make start -$ make test -$ make stop -``` - -If you want to set up the Redis clusters manually they should run on localhost -using following access ports: - -| Cluster type | Access port | -| ---------------------------------- | -------: | -| IPv4 | 7000 | -| IPv4, authentication needed, password: `secretword` | 7100 | -| IPv6 | 7200 | -| IPv4, using TLS/SSL | 7300 | - -## Quick usage - -## Cluster synchronous API - -### Connecting - -The function `redisClusterContextInit` is used to create a `redisClusterContext`. -The function `redisClusterSetOptionAddNodes` is used to add one or many Redis Cluster addresses. -The function `redisClusterConnect2` is used to connect to the Redis Cluster. -The context is where the state for connections is kept. -The `redisClusterContext`struct has an integer `err` field that is non-zero when the connection is -in an error state. The field `errstr` will contain a string with a description of the error. -After trying to connect to Redis using `redisClusterContext` you should check the `err` field to see -if establishing the connection was successful: -```c -redisClusterContext *cc = redisClusterContextInit(); -redisClusterSetOptionAddNodes(cc, "127.0.0.1:6379,127.0.0.1:6380"); -redisClusterConnect2(cc); -if (cc != NULL && cc->err) { - printf("Error: %s\n", cc->errstr); - // handle error -} -``` - -### Sending commands - -The function `redisClusterCommand` takes a format similar to printf. -In the simplest form it is used like: -```c -reply = redisClusterCommand(clustercontext, "SET foo bar"); -``` - -The specifier `%s` interpolates a string in the command, and uses `strlen` to -determine the length of the string: -```c -reply = redisClusterCommand(clustercontext, "SET foo %s", value); -``` -Internally, hiredis-cluster splits the command in different arguments and will -convert it to the protocol used to communicate with Redis. -One or more spaces separates arguments, so you can use the specifiers -anywhere in an argument: -```c -reply = redisClusterCommand(clustercontext, "SET key:%s %s", myid, value); -``` - -### Sending multi-key commands - -Hiredis-cluster supports mget/mset/del multi-key commands. -The command will be splitted per slot and sent to correct Redis nodes. - -Example: -```c -reply = redisClusterCommand(clustercontext, "mget %s %s %s %s", key1, key2, key3, key4); -``` - -### Sending commands to a specific node - -When there is a need to send commands to a specific node, the following low-level API can be used. - -```c -reply = redisClusterCommandToNode(clustercontext, node, "DBSIZE"); -``` - -The function handles printf like arguments similar to `redisClusterCommand()`, but will -only attempt to send the command to the given node and will not perform redirects or retries. - -### Teardown - -To disconnect and free the context the following function can be used: -```c -void redisClusterFree(redisClusterContext *cc); -``` -This function closes the sockets and deallocates the context. - -### Cluster pipelining - -The function `redisClusterGetReply` is exported as part of the Hiredis API and can be used -when a reply is expected on the socket. To pipeline commands, the only things that needs -to be done is filling up the output buffer. For this cause, the following commands can be used that -are identical to the `redisClusterCommand` family, apart from not returning a reply: -```c -int redisClusterAppendCommand(redisClusterContext *cc, const char *format, ...); -int redisClusterAppendCommandArgv(redisClusterContext *cc, int argc, const char **argv); - -/* Send a command to a specific cluster node */ -int redisClusterAppendCommandToNode(redisClusterContext *cc, cluster_node *node, - const char *format, ...); -``` -After calling either function one or more times, `redisClusterGetReply` can be used to receive the -subsequent replies. The return value for this function is either `REDIS_OK` or `REDIS_ERR`, where -the latter means an error occurred while reading a reply. Just as with the other commands, -the `err` field in the context can be used to find out what the cause of this error is. -```c -void redisClusterReset(redisClusterContext *cc); -``` -Warning: You must call `redisClusterReset` function after one pipelining anyway. - -Warning: Calling `redisClusterReset` without pipelining first will reset all Redis connections. - -The following examples shows a simple cluster pipeline: -```c -redisReply *reply; -redisClusterAppendCommand(clusterContext,"SET foo bar"); -redisClusterAppendCommand(clusterContext,"GET foo"); -redisClusterGetReply(clusterContext,&reply); // reply for SET -freeReplyObject(reply); -redisClusterGetReply(clusterContext,&reply); // reply for GET -freeReplyObject(reply); -redisClusterReset(clusterContext); -``` - -## Cluster asynchronous API - -Hiredis-cluster comes with an asynchronous cluster API that works with many event systems. -Currently there are adapters that enables support for `libevent`, `libev`, `libuv`, `glib` -and Redis Event Library (`ae`). For usage examples, see the test programs with the different -event libraries `tests/ct_async_{libev,libuv,glib}.c`. - -The hiredis library has adapters for additional event systems that easily can be adapted -for hiredis-cluster as well. - -### Connecting - -The function `redisAsyncConnect` can be used to establish a non-blocking connection to -Redis. It returns a pointer to the newly created `redisAsyncContext` struct. The `err` field -should be checked after creation to see if there were errors creating the connection. -Because the connection that will be created is non-blocking, the kernel is not able to -instantly return if the specified host and port is able to accept a connection. -```c -redisClusterAsyncContext *acc = redisClusterAsyncConnect("127.0.0.1:6379", HIRCLUSTER_FLAG_NULL); -if (acc->err) { - printf("Error: %s\n", acc->errstr); - // handle error -} -``` - -The cluster asynchronous context can hold a disconnect callback function that is called when the -connection is disconnected (either because of an error or per user request). This function should -have the following prototype: -```c -void(const redisAsyncContext *c, int status); -``` -On a disconnect, the `status` argument is set to `REDIS_OK` when disconnection was initiated by the -user, or `REDIS_ERR` when the disconnection was caused by an error. When it is `REDIS_ERR`, the `err` -field in the context can be accessed to find out the cause of the error. - -You dont need to reconnect in the disconnect callback, hiredis-cluster will reconnect by itself when next command for this Redis node is handled. - -Setting the disconnect callback can only be done once per context. For subsequent calls it will -return `REDIS_ERR`. The function to set the disconnect callback has the following prototype: -```c -int redisClusterAsyncSetDisconnectCallback(redisClusterAsyncContext *acc, redisDisconnectCallback *fn); -``` - -### Sending commands and their callbacks - -In an asynchronous cluster context, commands are automatically pipelined due to the nature of an event loop. -Therefore, unlike the synchronous cluster API, there is only a single way to send commands. -Because commands are sent to Redis Cluster asynchronously, issuing a command requires a callback function -that is called when the reply is received. Reply callbacks should have the following prototype: -```c -void(redisClusterAsyncContext *acc, void *reply, void *privdata); -``` -The `privdata` argument can be used to carry arbitrary data to the callback from the point where -the command is initially queued for execution. - -The functions that can be used to issue commands in an asynchronous context are: -```c -int redisClusterAsyncCommand(redisClusterAsyncContext *acc, - redisClusterCallbackFn *fn, - void *privdata, const char *format, ...); -int redisClusterAsyncCommandToNode(redisClusterAsyncContext *acc, - cluster_node *node, - redisClusterCallbackFn *fn, void *privdata, - const char *format, ...); -int redisClusterAsyncFormattedCommandToNode(redisClusterAsyncContext *acc, - cluster_node *node, - redisClusterCallbackFn *fn, - void *privdata, char *cmd, int len); -``` -These functions works like their blocking counterparts. The return value is `REDIS_OK` when the command -was successfully added to the output buffer and `REDIS_ERR` otherwise. Example: when the connection -is being disconnected per user-request, no new commands may be added to the output buffer and `REDIS_ERR` is -returned on calls to the `redisClusterAsyncCommand` family. - -If the reply for a command with a `NULL` callback is read, it is immediately freed. When the callback -for a command is non-`NULL`, the memory is freed immediately following the callback: the reply is only -valid for the duration of the callback. - -All pending callbacks are called with a `NULL` reply when the context encountered an error. - -### Disconnecting - -Asynchronous cluster connections can be terminated using: -```c -void redisClusterAsyncDisconnect(redisClusterAsyncContext *acc); -``` -When this function is called, connections are **not** immediately terminated. Instead, new -commands are no longer accepted and connections are only terminated when all pending commands -have been written to a socket, their respective replies have been read and their respective -callbacks have been executed. After this, the disconnection callback is executed with the -`REDIS_OK` status and the context object is freed. - -### Using event library *X* - -There are a few hooks that need to be set on the cluster context object after it is created. -See the `adapters/` directory for bindings to *libevent* and a range of other event libraries. - -### Allocator injection - -Hiredis-cluster uses hiredis allocation structure with configurable allocation and deallocation functions. By default they just point to libc (`malloc`, `calloc`, `realloc`, etc). - -#### Overriding - -If you have your own allocator or if you expect an abort in out-of-memory cases, -you can configure the used functions in the following way: - -```c -hiredisAllocFuncs myfuncs = { - .mallocFn = my_malloc, - .callocFn = my_calloc, - .reallocFn = my_realloc, - .strdupFn = my_strdup, - .freeFn = my_free, -}; - -// Override allocators (function returns current allocators if needed) -hiredisAllocFuncs orig = hiredisSetAllocators(&myfuncs); -``` - -To reset the allocators to their default libc functions simply call: - -```c -hiredisResetAllocators(); -``` diff --git a/vendor/github.com/Nordix/hiredis-cluster/adapters/glib.h b/vendor/github.com/Nordix/hiredis-cluster/adapters/glib.h deleted file mode 100644 index c3718a21af8..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/adapters/glib.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2021, Björn Svensson - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __HIREDIS_CLUSTER_GLIB_H__ -#define __HIREDIS_CLUSTER_GLIB_H__ - -#include "../hircluster.h" -#include - -typedef struct redisClusterGlibAdapter { - GMainContext *context; -} redisClusterGlibAdapter; - -static int redisGlibAttach_link(redisAsyncContext *ac, void *adapter) { - GMainContext *context = ((redisClusterGlibAdapter *)adapter)->context; - if (g_source_attach(redis_source_new(ac), context) > 0) { - return REDIS_OK; - } - return REDIS_ERR; -} - -static int redisClusterGlibAttach(redisClusterAsyncContext *acc, - redisClusterGlibAdapter *adapter) { - if (acc == NULL || adapter == NULL) { - return REDIS_ERR; - } - - acc->adapter = adapter; - acc->attach_fn = redisGlibAttach_link; - - return REDIS_OK; -} - -#endif diff --git a/vendor/github.com/Nordix/hiredis-cluster/adapters/libev.h b/vendor/github.com/Nordix/hiredis-cluster/adapters/libev.h deleted file mode 100644 index 084fd34fe2d..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/adapters/libev.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2021, Björn Svensson - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __HIREDIS_CLUSTER_LIBEV_H__ -#define __HIREDIS_CLUSTER_LIBEV_H__ - -#include "../hircluster.h" -#include - -static int redisLibevAttach_link(redisAsyncContext *ac, void *loop) { - return redisLibevAttach((struct ev_loop *)loop, ac); -} - -static int redisClusterLibevAttach(redisClusterAsyncContext *acc, - struct ev_loop *loop) { - if (loop == NULL || acc == NULL) { - return REDIS_ERR; - } - - acc->adapter = loop; - acc->attach_fn = redisLibevAttach_link; - - return REDIS_OK; -} - -#endif diff --git a/vendor/github.com/Nordix/hiredis-cluster/adapters/libevent.h b/vendor/github.com/Nordix/hiredis-cluster/adapters/libevent.h deleted file mode 100644 index 87d312f08d2..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/adapters/libevent.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2010-2011, Pieter Noordhuis - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __HIREDIS_CLUSTER_LIBEVENT_H__ -#define __HIREDIS_CLUSTER_LIBEVENT_H__ - -#include "../hircluster.h" -#include - -static int redisLibeventAttach_link(redisAsyncContext *ac, void *base) { - return redisLibeventAttach(ac, (struct event_base *)base); -} - -static int redisClusterLibeventAttach(redisClusterAsyncContext *acc, - struct event_base *base) { - - if (acc == NULL || base == NULL) { - return REDIS_ERR; - } - - acc->adapter = base; - acc->attach_fn = redisLibeventAttach_link; - - return REDIS_OK; -} - -#endif diff --git a/vendor/github.com/Nordix/hiredis-cluster/command.c b/vendor/github.com/Nordix/hiredis-cluster/command.c deleted file mode 100644 index 55e155e9dae..00000000000 --- a/vendor/github.com/Nordix/hiredis-cluster/command.c +++ /dev/null @@ -1,1350 +0,0 @@ -/* - * Copyright (c) 2015-2017, Ieshen Zheng - * Copyright (c) 2020, Nick - * Copyright (c) 2020-2021, Bjorn Svensson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include -#ifndef _WIN32 -#include -#endif - -#include "command.h" -#include "hiarray.h" -#include "hiutil.h" -#include "win32.h" - -#define LF (uint8_t)10 -#define CR (uint8_t)13 - -static uint64_t cmd_id = 0; /* command id counter */ - -/* - * Return true, if the redis command take no key, otherwise - * return false - * Format: command - */ -static int redis_argz(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_PING: - case CMD_REQ_REDIS_QUIT: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command accepts no arguments, otherwise - * return false - * Format: command key - */ -static int redis_arg0(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_PERSIST: - case CMD_REQ_REDIS_PTTL: - case CMD_REQ_REDIS_SORT: - case CMD_REQ_REDIS_TTL: - case CMD_REQ_REDIS_TYPE: - case CMD_REQ_REDIS_DUMP: - - case CMD_REQ_REDIS_DECR: - case CMD_REQ_REDIS_GET: - case CMD_REQ_REDIS_INCR: - case CMD_REQ_REDIS_STRLEN: - - case CMD_REQ_REDIS_HGETALL: - case CMD_REQ_REDIS_HKEYS: - case CMD_REQ_REDIS_HLEN: - case CMD_REQ_REDIS_HVALS: - - case CMD_REQ_REDIS_LLEN: - case CMD_REQ_REDIS_LPOP: - case CMD_REQ_REDIS_RPOP: - - case CMD_REQ_REDIS_SCARD: - case CMD_REQ_REDIS_SMEMBERS: - case CMD_REQ_REDIS_SPOP: - - case CMD_REQ_REDIS_XLEN: - case CMD_REQ_REDIS_ZCARD: - case CMD_REQ_REDIS_PFCOUNT: - case CMD_REQ_REDIS_AUTH: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command accepts exactly 1 argument, otherwise - * return false - * Format: command key arg - */ -static int redis_arg1(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_EXPIRE: - case CMD_REQ_REDIS_EXPIREAT: - case CMD_REQ_REDIS_PEXPIRE: - case CMD_REQ_REDIS_PEXPIREAT: - - case CMD_REQ_REDIS_APPEND: - case CMD_REQ_REDIS_DECRBY: - case CMD_REQ_REDIS_GETBIT: - case CMD_REQ_REDIS_GETSET: - case CMD_REQ_REDIS_INCRBY: - case CMD_REQ_REDIS_INCRBYFLOAT: - case CMD_REQ_REDIS_SETNX: - - case CMD_REQ_REDIS_HEXISTS: - case CMD_REQ_REDIS_HGET: - - case CMD_REQ_REDIS_LINDEX: - case CMD_REQ_REDIS_LPUSHX: - case CMD_REQ_REDIS_RPOPLPUSH: - case CMD_REQ_REDIS_RPUSHX: - - case CMD_REQ_REDIS_PUBLISH: - - case CMD_REQ_REDIS_SISMEMBER: - - case CMD_REQ_REDIS_ZRANK: - case CMD_REQ_REDIS_ZREVRANK: - case CMD_REQ_REDIS_ZSCORE: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command accepts exactly 2 arguments, otherwise - * return false - * Format: command key arg1 arg2 - */ -static int redis_arg2(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_GETRANGE: - case CMD_REQ_REDIS_PSETEX: - case CMD_REQ_REDIS_SETBIT: - case CMD_REQ_REDIS_SETEX: - case CMD_REQ_REDIS_SETRANGE: - - case CMD_REQ_REDIS_HINCRBY: - case CMD_REQ_REDIS_HINCRBYFLOAT: - case CMD_REQ_REDIS_HSETNX: - - case CMD_REQ_REDIS_LRANGE: - case CMD_REQ_REDIS_LREM: - case CMD_REQ_REDIS_LSET: - case CMD_REQ_REDIS_LTRIM: - - case CMD_REQ_REDIS_SMOVE: - - case CMD_REQ_REDIS_ZCOUNT: - case CMD_REQ_REDIS_ZLEXCOUNT: - case CMD_REQ_REDIS_ZINCRBY: - case CMD_REQ_REDIS_ZREMRANGEBYLEX: - case CMD_REQ_REDIS_ZREMRANGEBYRANK: - case CMD_REQ_REDIS_ZREMRANGEBYSCORE: - - case CMD_REQ_REDIS_RESTORE: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command accepts exactly 3 arguments, otherwise - * return false - * Format: command key arg1 arg2 arg3 - */ -static int redis_arg3(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_LINSERT: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command accepts 0 or more arguments, otherwise - * return false - * Format: command key [ arg ... ] - */ -static int redis_argn(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_BITCOUNT: - case CMD_REQ_REDIS_BITFIELD: - case CMD_REQ_REDIS_BITFIELD_RO: - - case CMD_REQ_REDIS_GEOADD: - - case CMD_REQ_REDIS_SET: - case CMD_REQ_REDIS_HDEL: - case CMD_REQ_REDIS_HMGET: - case CMD_REQ_REDIS_HMSET: - case CMD_REQ_REDIS_HSCAN: - case CMD_REQ_REDIS_HSET: - - case CMD_REQ_REDIS_LPUSH: - case CMD_REQ_REDIS_RPUSH: - - case CMD_REQ_REDIS_SADD: - case CMD_REQ_REDIS_SDIFF: - case CMD_REQ_REDIS_SDIFFSTORE: - case CMD_REQ_REDIS_SINTER: - case CMD_REQ_REDIS_SINTERSTORE: - case CMD_REQ_REDIS_SREM: - case CMD_REQ_REDIS_SUNION: - case CMD_REQ_REDIS_SUNIONSTORE: - case CMD_REQ_REDIS_SRANDMEMBER: - case CMD_REQ_REDIS_SSCAN: - - case CMD_REQ_REDIS_PFADD: - case CMD_REQ_REDIS_PFMERGE: - case CMD_REQ_REDIS_XACK: - case CMD_REQ_REDIS_XADD: - case CMD_REQ_REDIS_XAUTOCLAIM: - case CMD_REQ_REDIS_XCLAIM: - case CMD_REQ_REDIS_XDEL: - case CMD_REQ_REDIS_XPENDING: - case CMD_REQ_REDIS_XRANGE: - case CMD_REQ_REDIS_XREVRANGE: - case CMD_REQ_REDIS_XTRIM: - case CMD_REQ_REDIS_ZADD: - case CMD_REQ_REDIS_ZINTERSTORE: - case CMD_REQ_REDIS_ZRANGE: - case CMD_REQ_REDIS_ZRANGEBYSCORE: - case CMD_REQ_REDIS_ZREM: - case CMD_REQ_REDIS_ZREVRANGE: - case CMD_REQ_REDIS_ZRANGEBYLEX: - case CMD_REQ_REDIS_ZREVRANGEBYSCORE: - case CMD_REQ_REDIS_ZUNIONSTORE: - case CMD_REQ_REDIS_ZSCAN: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command is a vector command accepting one or - * more keys, otherwise return false - * Format: command key [ key ... ] - */ -static int redis_argx(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_EXISTS: - case CMD_REQ_REDIS_MGET: - case CMD_REQ_REDIS_DEL: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command is a vector command accepting one or - * more key-value pairs, otherwise return false - * Format: command key value [ key value ... ] - */ -static int redis_argkvx(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_MSET: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Check if command type expects a sub-command before the key - * Format: command subcommand key [ arg ... ] - */ -static int redis_argsub(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_XGROUP: - case CMD_REQ_REDIS_XINFO: - return 1; - - default: - break; - } - - return 0; -} - -/* - * Return true, if the redis command is either EVAL or EVALSHA. These commands - * have a special format with exactly 2 arguments, followed by one or more keys, - * followed by zero or more arguments (the documentation online seems to suggest - * that at least one argument is required, but that shouldn't be the case). - */ -static int redis_argeval(struct cmd *r) { - switch (r->type) { - case CMD_REQ_REDIS_EVAL: - case CMD_REQ_REDIS_EVALSHA: - return 1; - - default: - break; - } - - return 0; -} - -static inline cmd_type_t redis_parse_cmd_verb(const char *m, int len) { - // clang-format off - switch (len) { - case 3: - return !strncasecmp(m, "get", 3) ? CMD_REQ_REDIS_GET : - !strncasecmp(m, "set", 3) ? CMD_REQ_REDIS_SET : - !strncasecmp(m, "ttl", 3) ? CMD_REQ_REDIS_TTL : - !strncasecmp(m, "del", 3) ? CMD_REQ_REDIS_DEL : - CMD_UNKNOWN; - case 4: - return !strncasecmp(m, "pttl", 4) ? CMD_REQ_REDIS_PTTL : - !strncasecmp(m, "decr", 4) ? CMD_REQ_REDIS_DECR : - !strncasecmp(m, "dump", 4) ? CMD_REQ_REDIS_DUMP : - !strncasecmp(m, "hdel", 4) ? CMD_REQ_REDIS_HDEL : - !strncasecmp(m, "hget", 4) ? CMD_REQ_REDIS_HGET : - !strncasecmp(m, "hlen", 4) ? CMD_REQ_REDIS_HLEN : - !strncasecmp(m, "hset", 4) ? CMD_REQ_REDIS_HSET : - !strncasecmp(m, "incr", 4) ? CMD_REQ_REDIS_INCR : - !strncasecmp(m, "llen", 4) ? CMD_REQ_REDIS_LLEN : - !strncasecmp(m, "lpop", 4) ? CMD_REQ_REDIS_LPOP : - !strncasecmp(m, "lrem", 4) ? CMD_REQ_REDIS_LREM : - !strncasecmp(m, "lset", 4) ? CMD_REQ_REDIS_LSET : - !strncasecmp(m, "rpop", 4) ? CMD_REQ_REDIS_RPOP : - !strncasecmp(m, "sadd", 4) ? CMD_REQ_REDIS_SADD : - !strncasecmp(m, "spop", 4) ? CMD_REQ_REDIS_SPOP : - !strncasecmp(m, "srem", 4) ? CMD_REQ_REDIS_SREM : - !strncasecmp(m, "type", 4) ? CMD_REQ_REDIS_TYPE : - !strncasecmp(m, "mget", 4) ? CMD_REQ_REDIS_MGET : - !strncasecmp(m, "mset", 4) ? CMD_REQ_REDIS_MSET : - !strncasecmp(m, "xack", 4) ? CMD_REQ_REDIS_XACK : - !strncasecmp(m, "xadd", 4) ? CMD_REQ_REDIS_XADD : - !strncasecmp(m, "xdel", 4) ? CMD_REQ_REDIS_XDEL : - !strncasecmp(m, "xlen", 4) ? CMD_REQ_REDIS_XLEN : - !strncasecmp(m, "zadd", 4) ? CMD_REQ_REDIS_ZADD : - !strncasecmp(m, "zrem", 4) ? CMD_REQ_REDIS_ZREM : - !strncasecmp(m, "eval", 4) ? CMD_REQ_REDIS_EVAL : - !strncasecmp(m, "sort", 4) ? CMD_REQ_REDIS_SORT : - !strncasecmp(m, "ping", 4) ? CMD_REQ_REDIS_PING : - !strncasecmp(m, "quit", 4) ? CMD_REQ_REDIS_QUIT : - !strncasecmp(m, "auth", 4) ? CMD_REQ_REDIS_AUTH : - CMD_UNKNOWN; - case 5: - return !strncasecmp(m, "hkeys", 5) ? CMD_REQ_REDIS_HKEYS : - !strncasecmp(m, "hmget", 5) ? CMD_REQ_REDIS_HMGET : - !strncasecmp(m, "hmset", 5) ? CMD_REQ_REDIS_HMSET : - !strncasecmp(m, "hvals", 5) ? CMD_REQ_REDIS_HVALS : - !strncasecmp(m, "hscan", 5) ? CMD_REQ_REDIS_HSCAN : - !strncasecmp(m, "lpush", 5) ? CMD_REQ_REDIS_LPUSH : - !strncasecmp(m, "ltrim", 5) ? CMD_REQ_REDIS_LTRIM : - !strncasecmp(m, "rpush", 5) ? CMD_REQ_REDIS_RPUSH : - !strncasecmp(m, "scard", 5) ? CMD_REQ_REDIS_SCARD : - !strncasecmp(m, "sdiff", 5) ? CMD_REQ_REDIS_SDIFF : - !strncasecmp(m, "setex", 5) ? CMD_REQ_REDIS_SETEX : - !strncasecmp(m, "setnx", 5) ? CMD_REQ_REDIS_SETNX : - !strncasecmp(m, "smove", 5) ? CMD_REQ_REDIS_SMOVE : - !strncasecmp(m, "sscan", 5) ? CMD_REQ_REDIS_SSCAN : - !strncasecmp(m, "xinfo", 5) ? CMD_REQ_REDIS_XINFO : - !strncasecmp(m, "xtrim", 5) ? CMD_REQ_REDIS_XTRIM : - !strncasecmp(m, "zcard", 5) ? CMD_REQ_REDIS_ZCARD : - !strncasecmp(m, "zrank", 5) ? CMD_REQ_REDIS_ZRANK : - !strncasecmp(m, "zscan", 5) ? CMD_REQ_REDIS_ZSCAN : - !strncasecmp(m, "pfadd", 5) ? CMD_REQ_REDIS_PFADD : - CMD_UNKNOWN; - case 6: - return !strncasecmp(m, "append", 6) ? CMD_REQ_REDIS_APPEND : - !strncasecmp(m, "decrby", 6) ? CMD_REQ_REDIS_DECRBY : - !strncasecmp(m, "exists", 6) ? CMD_REQ_REDIS_EXISTS : - !strncasecmp(m, "expire", 6) ? CMD_REQ_REDIS_EXPIRE : - !strncasecmp(m, "geoadd", 6) ? CMD_REQ_REDIS_GEOADD : - !strncasecmp(m, "getbit", 6) ? CMD_REQ_REDIS_GETBIT : - !strncasecmp(m, "getset", 6) ? CMD_REQ_REDIS_GETSET : - !strncasecmp(m, "psetex", 6) ? CMD_REQ_REDIS_PSETEX : - !strncasecmp(m, "hsetnx", 6) ? CMD_REQ_REDIS_HSETNX : - !strncasecmp(m, "incrby", 6) ? CMD_REQ_REDIS_INCRBY : - !strncasecmp(m, "lindex", 6) ? CMD_REQ_REDIS_LINDEX : - !strncasecmp(m, "lpushx", 6) ? CMD_REQ_REDIS_LPUSHX : - !strncasecmp(m, "lrange", 6) ? CMD_REQ_REDIS_LRANGE : - !strncasecmp(m, "rpushx", 6) ? CMD_REQ_REDIS_RPUSHX : - !strncasecmp(m, "setbit", 6) ? CMD_REQ_REDIS_SETBIT : - !strncasecmp(m, "sinter", 6) ? CMD_REQ_REDIS_SINTER : - !strncasecmp(m, "strlen", 6) ? CMD_REQ_REDIS_STRLEN : - !strncasecmp(m, "sunion", 6) ? CMD_REQ_REDIS_SUNION : - !strncasecmp(m, "xclaim", 6) ? CMD_REQ_REDIS_XCLAIM : - !strncasecmp(m, "xgroup", 6) ? CMD_REQ_REDIS_XGROUP : - !strncasecmp(m, "xrange", 6) ? CMD_REQ_REDIS_XRANGE : - !strncasecmp(m, "zcount", 6) ? CMD_REQ_REDIS_ZCOUNT : - !strncasecmp(m, "zrange", 6) ? CMD_REQ_REDIS_ZRANGE : - !strncasecmp(m, "zscore", 6) ? CMD_REQ_REDIS_ZSCORE : - CMD_UNKNOWN; - case 7: - return !strncasecmp(m, "persist", 7) ? CMD_REQ_REDIS_PERSIST : - !strncasecmp(m, "pexpire", 7) ? CMD_REQ_REDIS_PEXPIRE : - !strncasecmp(m, "hexists", 7) ? CMD_REQ_REDIS_HEXISTS : - !strncasecmp(m, "hgetall", 7) ? CMD_REQ_REDIS_HGETALL : - !strncasecmp(m, "hincrby", 7) ? CMD_REQ_REDIS_HINCRBY : - !strncasecmp(m, "linsert", 7) ? CMD_REQ_REDIS_LINSERT : - !strncasecmp(m, "zincrby", 7) ? CMD_REQ_REDIS_ZINCRBY : - !strncasecmp(m, "evalsha", 7) ? CMD_REQ_REDIS_EVALSHA : - !strncasecmp(m, "restore", 7) ? CMD_REQ_REDIS_RESTORE : - !strncasecmp(m, "pfcount", 7) ? CMD_REQ_REDIS_PFCOUNT : - !strncasecmp(m, "pfmerge", 7) ? CMD_REQ_REDIS_PFMERGE : - !strncasecmp(m, "publish", 7) ? CMD_REQ_REDIS_PUBLISH : - CMD_UNKNOWN; - case 8: - return !strncasecmp(m, "expireat", 8) ? CMD_REQ_REDIS_EXPIREAT : - !strncasecmp(m, "bitcount", 8) ? CMD_REQ_REDIS_BITCOUNT : - !strncasecmp(m, "bitfield", 8) ? CMD_REQ_REDIS_BITFIELD : - !strncasecmp(m, "getrange", 8) ? CMD_REQ_REDIS_GETRANGE : - !strncasecmp(m, "setrange", 8) ? CMD_REQ_REDIS_SETRANGE : - !strncasecmp(m, "smembers", 8) ? CMD_REQ_REDIS_SMEMBERS : - !strncasecmp(m, "xpending", 8) ? CMD_REQ_REDIS_XPENDING : - !strncasecmp(m, "zrevrank", 8) ? CMD_REQ_REDIS_ZREVRANK : - CMD_UNKNOWN; - case 9: - return !strncasecmp(m, "pexpireat", 9) ? CMD_REQ_REDIS_PEXPIREAT : - !strncasecmp(m, "rpoplpush", 9) ? CMD_REQ_REDIS_RPOPLPUSH : - !strncasecmp(m, "sismember", 9) ? CMD_REQ_REDIS_SISMEMBER : - !strncasecmp(m, "xrevrange", 9) ? CMD_REQ_REDIS_XREVRANGE : - !strncasecmp(m, "zrevrange", 9) ? CMD_REQ_REDIS_ZREVRANGE : - !strncasecmp(m, "zlexcount", 9) ? CMD_REQ_REDIS_ZLEXCOUNT : - CMD_UNKNOWN; - case 10: - return !strncasecmp(m, "sdiffstore", 10) ? CMD_REQ_REDIS_SDIFFSTORE : - !strncasecmp(m, "xautoclaim", 10) ? CMD_REQ_REDIS_XAUTOCLAIM : - CMD_UNKNOWN; - case 11: - return !strncasecmp(m, "bitfield_ro", 11) ? CMD_REQ_REDIS_BITFIELD_RO : - !strncasecmp(m, "incrbyfloat", 11) ? CMD_REQ_REDIS_INCRBYFLOAT : - !strncasecmp(m, "sinterstore", 11) ? CMD_REQ_REDIS_SINTERSTORE : - !strncasecmp(m, "srandmember", 11) ? CMD_REQ_REDIS_SRANDMEMBER : - !strncasecmp(m, "sunionstore", 11) ? CMD_REQ_REDIS_SUNIONSTORE : - !strncasecmp(m, "zinterstore", 11) ? CMD_REQ_REDIS_ZINTERSTORE : - !strncasecmp(m, "zunionstore", 11) ? CMD_REQ_REDIS_ZUNIONSTORE : - !strncasecmp(m, "zrangebylex", 11) ? CMD_REQ_REDIS_ZRANGEBYLEX : - CMD_UNKNOWN; - case 12: - return !strncasecmp(m, "hincrbyfloat", 12) ? - CMD_REQ_REDIS_HINCRBYFLOAT : - CMD_UNKNOWN; - case 13: - return !strncasecmp(m, "zrangebyscore", 13) ? - CMD_REQ_REDIS_ZRANGEBYSCORE : - CMD_UNKNOWN; - case 14: - return !strncasecmp(m, "zremrangebylex", 14) ? - CMD_REQ_REDIS_ZREMRANGEBYLEX : - CMD_UNKNOWN; - case 15: - return !strncasecmp(m, "zremrangebyrank", 15) ? - CMD_REQ_REDIS_ZREMRANGEBYRANK : - CMD_UNKNOWN; - case 16: - return !strncasecmp(m, "zremrangebyscore", 16) ? - CMD_REQ_REDIS_ZREMRANGEBYSCORE : - !strncasecmp(m, "zrevrangebyscore", 16) ? - CMD_REQ_REDIS_ZREVRANGEBYSCORE : - CMD_UNKNOWN; - default: - return CMD_UNKNOWN; - } - // clang-format on -} - -/* - * Reference: http://redis.io/topics/protocol - * - * Redis >= 1.2 uses the unified protocol to send requests to the Redis - * server. In the unified protocol all the arguments sent to the server - * are binary safe and every request has the following general form: - * - * * CR LF - * $ CR LF - * CR LF - * ... - * $ CR LF - * CR LF - * - * Before the unified request protocol, redis protocol for requests supported - * the following commands - * 1). Inline commands: simple commands where arguments are just space - * separated strings. No binary safeness is possible. - * 2). Bulk commands: bulk commands are exactly like inline commands, but - * the last argument is handled in a special way in order to allow for - * a binary-safe last argument. - * - * only supports the Redis unified protocol for requests. - */ -void redis_parse_cmd(struct cmd *r) { - int len; - char *p, *m, *token = NULL; - char *cmd_end; - char ch; - uint32_t rlen = 0; /* running length in parsing fsa */ - uint32_t rnarg = 0; /* running # arg used by parsing fsa */ - enum { - SW_START, - SW_NARG, - SW_NARG_LF, - SW_CMD_TYPE_LEN, - SW_CMD_TYPE_LEN_LF, - SW_CMD_TYPE, - SW_CMD_TYPE_LF, - SW_KEY_LEN, - SW_KEY_LEN_LF, - SW_KEY, - SW_KEY_LF, - SW_ARG1_LEN, - SW_ARG1_LEN_LF, - SW_ARG1, - SW_ARG1_LF, - SW_ARG2_LEN, - SW_ARG2_LEN_LF, - SW_ARG2, - SW_ARG2_LF, - SW_ARG3_LEN, - SW_ARG3_LEN_LF, - SW_ARG3, - SW_ARG3_LF, - SW_ARGN_LEN, - SW_ARGN_LEN_LF, - SW_ARGN, - SW_ARGN_LF, - SW_SENTINEL - } state; - - state = SW_START; - cmd_end = r->cmd + r->clen; - - ASSERT(state >= SW_START && state < SW_SENTINEL); - ASSERT(r->cmd != NULL && r->clen > 0); - - for (p = r->cmd; p < cmd_end; p++) { - ch = *p; - - switch (state) { - - case SW_START: - case SW_NARG: - if (token == NULL) { - if (ch != '*') { - goto error; - } - token = p; - r->narg_start = p; - rnarg = 0; - state = SW_NARG; - } else if (isdigit(ch)) { - rnarg = rnarg * 10 + (uint32_t)(ch - '0'); - } else if (ch == CR) { - if (rnarg == 0) { - goto error; - } - r->narg = rnarg; - r->narg_end = p; - token = NULL; - state = SW_NARG_LF; - } else { - goto error; - } - - break; - - case SW_NARG_LF: - switch (ch) { - case LF: - state = SW_CMD_TYPE_LEN; - break; - - default: - goto error; - } - - break; - - case SW_CMD_TYPE_LEN: - if (token == NULL) { - if (ch != '$') { - goto error; - } - token = p; - rlen = 0; - } else if (isdigit(ch)) { - rlen = rlen * 10 + (uint32_t)(ch - '0'); - } else if (ch == CR) { - if (rlen == 0 || rnarg == 0) { - goto error; - } - rnarg--; - token = NULL; - state = SW_CMD_TYPE_LEN_LF; - } else { - goto error; - } - - break; - - case SW_CMD_TYPE_LEN_LF: - switch (ch) { - case LF: - state = SW_CMD_TYPE; - break; - - default: - goto error; - } - - break; - - case SW_CMD_TYPE: - if (token == NULL) { - token = p; - } - - m = token + rlen; - if (m >= cmd_end) { - goto error; - } - - if (*m != CR) { - goto error; - } - - p = m; /* move forward by rlen bytes */ - rlen = 0; - m = token; - token = NULL; - r->type = redis_parse_cmd_verb(m, p - m); - if (r->type == CMD_UNKNOWN) { - goto error; - } - - state = SW_CMD_TYPE_LF; - break; - - case SW_CMD_TYPE_LF: - switch (ch) { - case LF: - if (redis_argz(r)) { - goto done; - } else if (redis_argeval(r) || redis_argsub(r)) { - state = SW_ARG1_LEN; - } else { - state = SW_KEY_LEN; - } - break; - - default: - goto error; - } - - break; - - case SW_KEY_LEN: - if (token == NULL) { - if (ch != '$') { - goto error; - } - token = p; - rlen = 0; - } else if (isdigit(ch)) { - rlen = rlen * 10 + (uint32_t)(ch - '0'); - } else if (ch == CR) { - - if (rnarg == 0) { - goto error; - } - rnarg--; - token = NULL; - state = SW_KEY_LEN_LF; - } else { - goto error; - } - - break; - - case SW_KEY_LEN_LF: - switch (ch) { - case LF: - state = SW_KEY; - break; - - default: - goto error; - } - - break; - - case SW_KEY: - if (token == NULL) { - token = p; - } - - m = token + rlen; - if (m >= cmd_end) { - goto error; - } - - if (*m != CR) { - goto error; - } else { /* got a key */ - struct keypos *kpos; - - p = m; /* move forward by rlen bytes */ - rlen = 0; - m = token; - token = NULL; - - kpos = hiarray_push(r->keys); - if (kpos == NULL) { - goto oom; - } - kpos->start = m; - kpos->end = p; - - state = SW_KEY_LF; - } - - break; - - case SW_KEY_LF: - switch (ch) { - case LF: - if (redis_arg0(r)) { - if (rnarg != 0) { - goto error; - } - goto done; - } else if (redis_arg1(r)) { - if (rnarg != 1) { - goto error; - } - state = SW_ARG1_LEN; - } else if (redis_arg2(r)) { - if (rnarg != 2) { - goto error; - } - state = SW_ARG1_LEN; - } else if (redis_arg3(r)) { - if (rnarg != 3) { - goto error; - } - state = SW_ARG1_LEN; - } else if (redis_argn(r)) { - if (rnarg == 0) { - goto done; - } - state = SW_ARG1_LEN; - } else if (redis_argx(r)) { - if (rnarg == 0) { - goto done; - } - state = SW_KEY_LEN; - } else if (redis_argkvx(r)) { - if (rnarg == 0) { - goto done; - } - if (r->narg % 2 == 0) { - goto error; - } - state = SW_ARG1_LEN; - } else if (redis_argeval(r) || redis_argsub(r)) { - if (rnarg == 0) { - goto done; - } - state = SW_ARGN_LEN; - } else { - goto error; - } - - break; - - default: - goto error; - } - - break; - - case SW_ARG1_LEN: - if (token == NULL) { - if (ch != '$') { - goto error; - } - rlen = 0; - token = p; - } else if (isdigit(ch)) { - rlen = rlen * 10 + (uint32_t)(ch - '0'); - } else if (ch == CR) { - if ((p - token) <= 1 || rnarg == 0) { - goto error; - } - rnarg--; - token = NULL; - - /* - //for mset value length - if(redis_argkvx(r)) - { - struct keypos *kpos; - uint32_t array_len = array_n(r->keys); - if(array_len == 0) - { - goto error; - } - - kpos = array_n(r->keys, array_len-1); - if (kpos == NULL || kpos->v_len != 0) { - goto error; - } - - kpos->v_len = rlen; - } - */ - state = SW_ARG1_LEN_LF; - } else { - goto error; - } - - break; - - case SW_ARG1_LEN_LF: - switch (ch) { - case LF: - state = SW_ARG1; - break; - - default: - goto error; - } - - break; - - case SW_ARG1: - m = p + rlen; - if (m >= cmd_end) { - // Moving past the end, not good.. - goto error; - } - - if (*m != CR) { - goto error; - } - - p = m; /* move forward by rlen bytes */ - rlen = 0; - - state = SW_ARG1_LF; - - break; - - case SW_ARG1_LF: - // Check that the command parser has enough - // arguments left to be acceptable - // rnarg is the number of arguments after the first argument - switch (ch) { - case LF: - if (redis_arg1(r)) { - if (rnarg != 0) { - goto error; - } - goto done; - } else if (redis_arg2(r)) { - if (rnarg != 1) { - goto error; - } - state = SW_ARG2_LEN; - } else if (redis_arg3(r)) { - if (rnarg != 2) { - goto error; - } - state = SW_ARG2_LEN; - } else if (redis_argn(r)) { - if (rnarg == 0) { - goto done; - } - state = SW_ARGN_LEN; - } else if (redis_argeval(r)) { - // EVAL command layout: - // eval