diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 2bf4759d..9c8d8953 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -13,17 +13,20 @@ jobs: runs-on: ubuntu-latest steps: - - name: Set up Go 1.x - uses: actions/setup-go@v2 - with: - go-version: ^1.22 - id: go + - name: Check out code + uses: actions/checkout@v4 - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ^1.24 + cache: true - name: Build - run: go build -v -ldflags "-s -w -X main.version=$GITHUB_REF -X main.commit=${GITHUB_SHA::8}" ./cmd/kaf + run: go build -v -ldflags "-s -w -X main.version=${{ github.ref_name }} -X main.commit=${GITHUB_SHA::8}" ./cmd/kaf + + - name: Vet + run: go vet ./... - name: Test - run: go test -v ./... + run: go test -v -short ./... diff --git a/.gitignore b/.gitignore index 9708ffee..b5802c04 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,6 @@ /dist /kaf* .idea +REVIEW.md +WORK.md diff --git a/cmd/kaf/completion.go b/cmd/kaf/completion.go deleted file mode 100644 index 199dd019..00000000 --- a/cmd/kaf/completion.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(completionCmd) -} - -var completionCmd = &cobra.Command{ - Use: "completion [SHELL]", - Short: "Generate completion script for bash, zsh, fish or powershell", - Long: `To load completions: - -Bash: - -$ source <(kaf completion bash) - -# To load completions for each session, execute once: -Linux: - $ kaf completion bash > /etc/bash_completion.d/kaf -MacOS: - $ kaf completion bash > /usr/local/etc/bash_completion.d/kaf - -Zsh: - -# To load completions for each session, execute once: -$ kaf completion zsh > "${fpath[1]}/_kaf" - -# You will need to start a new shell for this setup to take effect. - -Fish: - -$ kaf completion fish | source - -# To load completions for each session, execute once: -$ kaf completion fish > ~/.config/fish/completions/kaf.fish -`, - DisableFlagsInUseLine: true, - Args: cobra.ExactValidArgs(1), - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - err := rootCmd.GenBashCompletion(outWriter) - if err != nil { - errorExit("Failed to generate bash completion: %w", err) - } - case "zsh": - if err := rootCmd.GenZshCompletion(outWriter); err != nil { - errorExit("Failed to generate zsh completion: %w", err) - } - case "fish": - if err := rootCmd.GenFishCompletion(outWriter, true); err != nil { - errorExit("Failed to generate fish completion: %w", err) - } - case "powershell": - err := rootCmd.GenPowerShellCompletion(outWriter) - if err != nil { - errorExit("Failed to generate powershell completion: %w", err) - } - } - }, -} diff --git a/cmd/kaf/config.go b/cmd/kaf/config.go deleted file mode 100644 index d788f628..00000000 --- a/cmd/kaf/config.go +++ /dev/null @@ -1,271 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "regexp" - - "github.com/IBM/sarama" - "github.com/birdayz/kaf/pkg/config" - "github.com/manifoldco/promptui" - "github.com/spf13/cobra" -) - -var ( - flagEhConnString string - flagBrokerVersion string -) - -func init() { - configCmd.AddCommand(configImportCmd) - configCmd.AddCommand(configUseCmd) - configCmd.AddCommand(configLsCmd) - configCmd.AddCommand(configAddClusterCmd) - configCmd.AddCommand(configRemoveClusterCmd) - configCmd.AddCommand(configSelectCluster) - configCmd.AddCommand(configCurrentContext) - configCmd.AddCommand(configAddEventhub) - rootCmd.AddCommand(configCmd) - - configLsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") - configAddEventhub.Flags().StringVar(&flagEhConnString, "eh-connstring", "", "EventHub ConnectionString") - configAddClusterCmd.Flags().StringVar(&flagBrokerVersion, "broker-version", "", fmt.Sprintf("Broker Version. Available Versions: %v", sarama.SupportedVersions)) -} - -var configCmd = &cobra.Command{ - Use: "config", - Short: "Handle kaf configuration", -} - -var configCurrentContext = &cobra.Command{ - Use: "current-context", - Short: "Displays the current context", - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println(cfg.CurrentCluster) - }, -} - -var configUseCmd = &cobra.Command{ - Use: "use-cluster [NAME]", - Short: "Sets the current cluster in the configuration", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validConfigArgs, - Run: func(cmd *cobra.Command, args []string) { - name := args[0] - if err := cfg.SetCurrentCluster(name); err != nil { - fmt.Printf("Cluster with name %v not found\n", name) - } else { - fmt.Printf("Switched to cluster \"%v\".\n", name) - } - }, -} - -var configLsCmd = &cobra.Command{ - Use: "get-clusters", - Short: "Display clusters in the configuration file", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - if !noHeaderFlag { - fmt.Println("NAME") - } - for _, cluster := range cfg.Clusters { - fmt.Println(cluster.Name) - } - }, -} - -var configAddEventhub = &cobra.Command{ - Use: "add-eventhub [NAME]", - Example: "esp config add-eventhub my-eventhub --eh-connstring 'Endpoint=sb://......AccessKey=....'", - Short: "Add Azure EventHub", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - name := args[0] - for _, cluster := range cfg.Clusters { - if cluster.Name == name { - errorExit("Could not add cluster: cluster with name '%v' exists already.", name) - } - } - - // Parse hub name from ConnString - r, _ := regexp.Compile(`^Endpoint=sb://(.*)\.servicebus.*$`) - hubName := r.FindStringSubmatch(flagEhConnString) - if len(hubName) != 2 { - errorExit("Failed to determine EventHub name from Connection String. Check your ConnectionString") - } - - cfg.Clusters = append(cfg.Clusters, &config.Cluster{ - Name: name, - Brokers: []string{hubName[1] + ".servicebus.windows.net:9093"}, - SchemaRegistryURL: schemaRegistryURL, - SASL: &config.SASL{ - Mechanism: "PLAIN", - Username: "$ConnectionString", - Password: flagEhConnString, - }, - SecurityProtocol: "SASL_SSL", - }) - err := cfg.Write() - if err != nil { - errorExit("Unable to write config: %v\n", err) - } - fmt.Println("Added EventHub.") - }, -} - -var configSelectCluster = &cobra.Command{ - Use: "select-cluster", - Aliases: []string{"ls"}, - Short: "Interactively select a cluster", - Run: func(cmd *cobra.Command, args []string) { - var clusterNames []string - var pos = 0 - for k, cluster := range cfg.Clusters { - clusterNames = append(clusterNames, cluster.Name) - if cluster.Name == cfg.CurrentCluster { - pos = k - } - } - - searcher := func(input string, index int) bool { - cluster := clusterNames[index] - name := strings.Replace(strings.ToLower(cluster), " ", "", -1) - input = strings.Replace(strings.ToLower(input), " ", "", -1) - return strings.Contains(name, input) - } - - p := promptui.Select{ - Label: "Select cluster", - Items: clusterNames, - Searcher: searcher, - Size: 10, - CursorPos: pos, - } - - _, selected, err := p.Run() - if err != nil { - os.Exit(0) - } - - // TODO copy pasta - if err := cfg.SetCurrentCluster(selected); err != nil { - fmt.Printf("Cluster with selected %v not found\n", selected) - } - }, -} - -var configAddClusterCmd = &cobra.Command{ - Use: "add-cluster [NAME]", - Short: "Add cluster", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - name := args[0] - for _, cluster := range cfg.Clusters { - if cluster.Name == name { - errorExit("Could not add cluster: cluster with name '%v' exists already.", name) - } - } - - cfg.Clusters = append(cfg.Clusters, &config.Cluster{ - Name: name, - Brokers: brokersFlag, - SchemaRegistryURL: schemaRegistryURL, - Version: flagBrokerVersion, - }) - err := cfg.Write() - if err != nil { - errorExit("Unable to write config: %v\n", err) - } - fmt.Println("Added cluster.") - }, -} - -var configRemoveClusterCmd = &cobra.Command{ - Use: "remove-cluster [NAME]", - Short: "remove cluster", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validConfigArgs, - Run: func(cmd *cobra.Command, args []string) { - name := args[0] - - var pos = -1 - for i, cluster := range cfg.Clusters { - if cluster.Name == name { - pos = i - break - } - } - - if pos == -1 { - errorExit("Could not delete cluster: cluster with name '%v' not exists.", name) - } - - cfg.Clusters = append(cfg.Clusters[:pos], cfg.Clusters[pos+1:]...) - - err := cfg.Write() - if err != nil { - errorExit("Unable to write config: %v\n", err) - } - fmt.Println("Removed cluster.") - }, -} - -var configImportCmd = &cobra.Command{ - Use: "import [ccloud]", - Short: "Import configurations into the $HOME/.kaf/config file", - Run: func(cmd *cobra.Command, args []string) { - if path, err := config.TryFindCcloudConfigFile(); err == nil { - fmt.Printf("Detected Confluent Cloud config in file %v\n", path) - if username, password, broker, err := config.ParseConfluentCloudConfig(path); err == nil { - - newCluster := &config.Cluster{ - Name: "ccloud", - Brokers: []string{broker}, - SASL: &config.SASL{ - Username: username, - Password: password, - Mechanism: "PLAIN", - }, - SecurityProtocol: "SASL_SSL", - } - - var found bool - for i, newCluster := range cfg.Clusters { - if newCluster.Name == "confluent cloud" { - found = true - cfg.Clusters[i] = newCluster - break - } - } - - if !found { - fmt.Println("Wrote new entry to config file") - cfg.Clusters = append(cfg.Clusters, newCluster) - } - - if cfg.CurrentCluster == "" { - cfg.CurrentCluster = newCluster.Name - } - err = cfg.Write() - if err != nil { - errorExit("Failed to write config: %w", err) - } - - } - } - }, - ValidArgs: []string{"ccloud"}, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.OnlyValidArgs(cmd, args); err != nil { - return err - } - - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return err - } - return nil - }, -} diff --git a/cmd/kaf/consume.go b/cmd/kaf/consume.go deleted file mode 100644 index c0a4f684..00000000 --- a/cmd/kaf/consume.go +++ /dev/null @@ -1,518 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "strconv" - "strings" - "sync" - "text/tabwriter" - - "github.com/IBM/sarama" - "github.com/birdayz/kaf/pkg/avro" - "github.com/birdayz/kaf/pkg/proto" - "github.com/golang/protobuf/jsonpb" - "github.com/hokaccha/go-prettyjson" - "github.com/spf13/cobra" - "github.com/vmihailenco/msgpack/v5" -) - -var ( - offsetFlag string - groupFlag string - groupCommitFlag bool - outputFormat = OutputFormatDefault - // Deprecated: Use outputFormat instead. - raw bool - follow bool - tail int32 - schemaCache *avro.SchemaCache - keyfmt *prettyjson.Formatter - - protoType string - keyProtoType string - - flagPartitions []int32 - - limitMessagesFlag int64 - - reg *proto.DescriptorRegistry - - headerFilterFlag []string - headerFilter = make(map[string]string) -) - -func init() { - rootCmd.AddCommand(consumeCmd) - consumeCmd.Flags().StringVar(&offsetFlag, "offset", "oldest", "Offset to start consuming. Possible values: oldest, newest, or integer.") - consumeCmd.Flags().BoolVar(&raw, "raw", false, "Print raw output of messages, without key or prettified JSON") - consumeCmd.Flags().Var(&outputFormat, "output", "Set output format messages: default, raw (without key or prettified JSON), hex (without key or prettified JSON), json, json-each-row") - consumeCmd.Flags().BoolVarP(&follow, "follow", "f", false, "Continue to consume messages until program execution is interrupted/terminated") - consumeCmd.Flags().Int32VarP(&tail, "tail", "n", 0, "Print last n messages per partition") - consumeCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") - consumeCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") - consumeCmd.Flags().BoolVar(&decodeMsgPack, "decode-msgpack", false, "Enable deserializing msgpack") - consumeCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") - consumeCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") - consumeCmd.Flags().Int32SliceVarP(&flagPartitions, "partitions", "p", []int32{}, "Partitions to consume from") - consumeCmd.Flags().Int64VarP(&limitMessagesFlag, "limit-messages", "l", 0, "Limit messages per partition") - consumeCmd.Flags().StringVarP(&groupFlag, "group", "g", "", "Consumer Group to use for consume") - consumeCmd.Flags().BoolVar(&groupCommitFlag, "commit", false, "Commit Group offset after receiving messages. Works only if consuming as Consumer Group") - consumeCmd.Flags().StringSliceVar(&headerFilterFlag, "header", []string{}, "Filter messages by header. Format: key:value. Multiple filters can be specified") - - if err := consumeCmd.RegisterFlagCompletionFunc("output", completeOutputFormat); err != nil { - errorExit("Failed to register flag completion: %v", err) - } - - if err := consumeCmd.Flags().MarkDeprecated("raw", "use --output raw instead"); err != nil { - errorExit("Failed to mark flag as deprecated: %v", err) - } - - keyfmt = prettyjson.NewFormatter() - keyfmt.Newline = " " // Replace newline with space to avoid condensed output. - keyfmt.Indent = 0 -} - -type offsets struct { - newest int64 - oldest int64 -} - -func getOffsets(client sarama.Client, topic string, partition int32) (*offsets, error) { - newest, err := client.GetOffset(topic, partition, sarama.OffsetNewest) - if err != nil { - return nil, err - } - - oldest, err := client.GetOffset(topic, partition, sarama.OffsetOldest) - if err != nil { - return nil, err - } - - return &offsets{ - newest: newest, - oldest: oldest, - }, nil -} - -var consumeCmd = &cobra.Command{ - Use: "consume TOPIC", - Short: "Consume messages", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validTopicArgs, - PreRun: setupProtoDescriptorRegistry, - Run: func(cmd *cobra.Command, args []string) { - var offset int64 - cfg := getConfig() - topic := args[0] - client := getClientFromConfig(cfg) - - // Allow deprecated flag to override when outputFormat is not specified, or default. - if outputFormat == OutputFormatDefault && raw { - outputFormat = OutputFormatRaw - } - - switch offsetFlag { - case "oldest": - offset = sarama.OffsetOldest - cfg.Consumer.Offsets.Initial = sarama.OffsetOldest - case "newest": - offset = sarama.OffsetNewest - cfg.Consumer.Offsets.Initial = sarama.OffsetNewest - default: - o, err := strconv.ParseInt(offsetFlag, 10, 64) - if err != nil { - errorExit("Could not parse '%s' to int64: %w", offsetFlag, err) - } - offset = o - } - - for _, f := range headerFilterFlag { - parts := strings.SplitN(f, ":", 2) - if len(parts) != 2 { - errorExit("Invalid header filter format: %s. Expected format: key:value", f) - } - headerFilter[parts[0]] = parts[1] - } - - if groupFlag != "" { - withConsumerGroup(cmd.Context(), client, topic, groupFlag) - } else { - withoutConsumerGroup(cmd.Context(), client, topic, offset) - } - - }, -} - -type g struct{} - -func (g *g) Setup(s sarama.ConsumerGroupSession) error { - return nil -} - -func (g *g) Cleanup(s sarama.ConsumerGroupSession) error { - return nil -} - -func (g *g) ConsumeClaim(s sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - - mu := sync.Mutex{} // Synchronizes stderr and stdout. - for msg := range claim.Messages() { - handleMessage(msg, &mu) - if groupCommitFlag { - s.MarkMessage(msg, "") - } - } - return nil -} - -func withConsumerGroup(ctx context.Context, client sarama.Client, topic, group string) { - cg, err := sarama.NewConsumerGroupFromClient(group, client) - if err != nil { - errorExit("Failed to create consumer group: %v", err) - } - - schemaCache = getSchemaCache() - - err = cg.Consume(ctx, []string{topic}, &g{}) - if err != nil { - errorExit("Error on consume: %v", err) - } -} - -func withoutConsumerGroup(ctx context.Context, client sarama.Client, topic string, offset int64) { - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - errorExit("Unable to create consumer from client: %v\n", err) - } - - var partitions []int32 - if len(flagPartitions) == 0 { - partitions, err = consumer.Partitions(topic) - if err != nil { - errorExit("Unable to get partitions: %v\n", err) - } - } else { - partitions = flagPartitions - } - - schemaCache = getSchemaCache() - - wg := sync.WaitGroup{} - mu := sync.Mutex{} // Synchronizes stderr and stdout. - for _, partition := range partitions { - wg.Add(1) - - go func(partition int32, offset int64) { - defer wg.Done() - - offsets, err := getOffsets(client, topic, partition) - if err != nil { - errorExit("Failed to get %s offsets for partition %d: %w", topic, partition, err) - } - - if tail != 0 { - offset = offsets.newest - int64(tail) - if offset < offsets.oldest { - offset = offsets.oldest - } - } - - // Already at end of partition, return early - if !follow && offsets.newest == offsets.oldest { - return - } - - pc, err := consumer.ConsumePartition(topic, partition, offset) - if err != nil { - errorExit("Unable to consume partition: %v %v %v %v\n", topic, partition, offset, err) - } - - var count int64 = 0 - for { - select { - case <-ctx.Done(): - return - case msg := <-pc.Messages(): - handleMessage(msg, &mu) - count++ - if limitMessagesFlag > 0 && count >= limitMessagesFlag { - return - } - if !follow && msg.Offset+1 >= pc.HighWaterMarkOffset() { - return - } - } - } - }(partition, offset) - } - wg.Wait() -} - -func checkHeaders(headers []*sarama.RecordHeader, filter map[string]string) bool { - if len(filter) == 0 { - return true - } - - matchCount := 0 - for _, h := range headers { - hdrStr := parseHeader(h.Value) - if val, ok := filter[string(h.Key)]; ok && hdrStr == val { - matchCount++ - } - } - - return len(headers) > 0 && matchCount >= len(headers) -} - -func handleMessage(msg *sarama.ConsumerMessage, mu *sync.Mutex) { - if !checkHeaders(msg.Headers, headerFilter) { - return - } - - var stderr bytes.Buffer - - var dataToDisplay []byte - var keyToDisplay []byte - var err error - - if protoType != "" { - dataToDisplay, err = protoDecode(reg, msg.Value, protoType) - if err != nil { - fmt.Fprintf(&stderr, "failed to decode proto. falling back to binary outputla. Error: %v\n", err) - } - } else { - dataToDisplay, err = avroDecode(msg.Value) - if err != nil { - fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) - } - } - - if keyProtoType != "" { - keyToDisplay, err = protoDecode(reg, msg.Key, keyProtoType) - if err != nil { - fmt.Fprintf(&stderr, "failed to decode proto key. falling back to binary outputla. Error: %v\n", err) - } - } else { - keyToDisplay, err = avroDecode(msg.Key) - if err != nil { - fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) - } - } - - if decodeMsgPack { - var obj interface{} - err = msgpack.Unmarshal(msg.Value, &obj) - if err != nil { - fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) - } - - dataToDisplay, err = json.Marshal(obj) - if err != nil { - fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) - } - } - - dataToDisplay = formatMessage(msg, dataToDisplay, keyToDisplay, &stderr) - - mu.Lock() - stderr.WriteTo(errWriter) - _, _ = colorableOut.Write(dataToDisplay) - fmt.Fprintln(outWriter) - mu.Unlock() -} - -func parseHeader(hdrBytes []byte) (hdrStr string) { - // Try to detect azure eventhub-specific encoding - if len(hdrBytes) > 0 { - switch hdrBytes[0] { - case 161: - hdrStr = string(hdrBytes[2 : 2+hdrBytes[1]]) - case 131: - hdrStr = strconv.FormatUint(binary.BigEndian.Uint64(hdrBytes[1:9]), 10) - default: - hdrStr = string(hdrBytes) - } - } - - return hdrStr -} - -func formatMessage(msg *sarama.ConsumerMessage, rawMessage []byte, keyToDisplay []byte, stderr *bytes.Buffer) []byte { - switch outputFormat { - case OutputFormatRaw: - return rawMessage - case OutputFormatJSON: - jsonMessage := make(map[string]interface{}) - - jsonMessage["partition"] = msg.Partition - jsonMessage["offset"] = msg.Offset - jsonMessage["timestamp"] = msg.Timestamp - - if len(msg.Headers) > 0 { - jsonMessage["headers"] = msg.Headers - } - - jsonMessage["key"] = formatJSON(keyToDisplay) - jsonMessage["payload"] = formatJSON(rawMessage) - - jsonToDisplay, err := json.Marshal(jsonMessage) - if err != nil { - fmt.Fprintf(stderr, "could not decode JSON data: %v", err) - } - - return jsonToDisplay - case OutputFormatJSONEachRow: - jsonMessage := JSONEachRowMessage{} - jsonMessage.Topic = msg.Topic - jsonMessage.Partition = msg.Partition - jsonMessage.Offset = msg.Offset - jsonMessage.Timestamp = msg.Timestamp - jsonMessage.Headers = make([]MessageHeader, len(msg.Headers)) - for i, hdr := range msg.Headers { - hdrStr := parseHeader(hdr.Value) - jsonMessage.Headers[i] = MessageHeader{ - Key: string(hdr.Key), - Value: hdrStr, - } - } - jsonMessage.Key = string(keyToDisplay) - jsonMessage.Payload = string(rawMessage) - - jsonToDisplay, err := json.Marshal(jsonMessage) - if err != nil { - fmt.Fprintf(stderr, "could not decode JSON data: %v", err) - } - - return jsonToDisplay - case OutputFormatHex: - return []byte(hex.EncodeToString(rawMessage)) - case OutputFormatDefault: - fallthrough - default: - if isJSON(rawMessage) { - rawMessage = formatValue(rawMessage) - } - - if isJSON(keyToDisplay) { - keyToDisplay = formatKey(keyToDisplay) - } - - w := tabwriter.NewWriter(stderr, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - - if len(msg.Headers) > 0 { - fmt.Fprintf(w, "Headers:\n") - } - - for _, hdr := range msg.Headers { - hdrStr := parseHeader(hdr.Value) - fmt.Fprintf(w, "\tKey: %v\tValue: %v\n", string(hdr.Key), hdrStr) - } - - if len(msg.Key) > 0 { - fmt.Fprintf(w, "Key:\t%v\n", string(keyToDisplay)) - } - fmt.Fprintf(w, "Partition:\t%v\nOffset:\t%v\nTimestamp:\t%v\n", msg.Partition, msg.Offset, msg.Timestamp) - w.Flush() - - return rawMessage - } -} - -// proto to JSON -func protoDecode(reg *proto.DescriptorRegistry, b []byte, _type string) ([]byte, error) { - dynamicMessage := reg.MessageForType(_type) - if dynamicMessage == nil { - return b, nil - } - - err := dynamicMessage.Unmarshal(b) - if err != nil { - return nil, err - } - - var m jsonpb.Marshaler - var w bytes.Buffer - - err = m.Marshal(&w, dynamicMessage) - if err != nil { - return nil, err - } - return w.Bytes(), nil - -} - -func avroDecode(b []byte) ([]byte, error) { - if schemaCache != nil { - return schemaCache.DecodeMessage(b) - } - return b, nil -} - -func formatKey(key []byte) []byte { - if b, err := keyfmt.Format(key); err == nil { - return b - } - return key - -} - -func formatValue(key []byte) []byte { - if b, err := prettyjson.Format(key); err == nil { - return b - } - return key -} - -func formatJSON(data []byte) interface{} { - var i interface{} - if err := json.Unmarshal(data, &i); err != nil { - return string(data) - } - - return i -} - -func isJSON(data []byte) bool { - var i interface{} - if err := json.Unmarshal(data, &i); err == nil { - return true - } - return false -} - -type OutputFormat string - -const ( - OutputFormatDefault OutputFormat = "default" - OutputFormatRaw OutputFormat = "raw" - OutputFormatJSON OutputFormat = "json" - OutputFormatJSONEachRow OutputFormat = "json-each-row" - OutputFormatHex OutputFormat = "hex" -) - -func (e *OutputFormat) String() string { - return string(*e) -} - -func (e *OutputFormat) Set(v string) error { - switch v { - case "default", "raw", "json", "json-each-row", "hex": - *e = OutputFormat(v) - return nil - default: - return fmt.Errorf("must be one of: default, raw, json, json-each-row, hex") - } -} - -func (e *OutputFormat) Type() string { - return "OutputFormat" -} - -func completeOutputFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"default", "raw", "json", "json-each-row", "hex"}, cobra.ShellCompDirectiveNoFileComp -} diff --git a/cmd/kaf/consume_test.go b/cmd/kaf/consume_test.go deleted file mode 100644 index b14a5f9a..00000000 --- a/cmd/kaf/consume_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "bytes" - "testing" - - "github.com/IBM/sarama" -) - -func TestCheckHeaders(t *testing.T) { - tests := []struct { - name string - headerFilter map[string]string - headers []*sarama.RecordHeader - want bool - }{ - { - name: "no filter", - headerFilter: map[string]string{}, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - }, - want: true, - }, - { - name: "matching header", - headerFilter: map[string]string{ - "a": "b", - }, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - }, - want: true, - }, - { - name: "non-matching header value", - headerFilter: map[string]string{ - "a": "c", - }, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - }, - want: false, - }, - { - name: "non-matching header key", - headerFilter: map[string]string{ - "c": "b", - }, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - }, - want: false, - }, - { - name: "multiple filters match", - headerFilter: map[string]string{ - "a": "b", - "c": "d", - }, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - {Key: []byte("c"), Value: []byte("d")}, - }, - want: true, - }, - { - name: "multiple filters one mismatch", - headerFilter: map[string]string{ - "a": "b", - "c": "e", - }, - headers: []*sarama.RecordHeader{ - {Key: []byte("a"), Value: []byte("b")}, - {Key: []byte("c"), Value: []byte("d")}, - }, - want: false, - }, - { - name: "no headers with filter", - headerFilter: map[string]string{"a": "b"}, - headers: []*sarama.RecordHeader{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := checkHeaders(tt.headers, tt.headerFilter); got != tt.want { - t.Errorf("checkHeaders() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestFormatMessage(t *testing.T) { - tests := []struct { - outputFormat OutputFormat - name string - kafkaMsg sarama.ConsumerMessage - rawMessage []byte - keyToDisplay []byte - stderr bytes.Buffer - want []byte - }{ - { - outputFormat: OutputFormatHex, - name: "hex format string", - kafkaMsg: sarama.ConsumerMessage{Topic: "test"}, - rawMessage: []byte("hello world"), - keyToDisplay: []byte("key1"), - want: []byte("68656c6c6f20776f726c64"), - }, - } - - for _, tt := range tests { - // NOTE: this test mutates global state (outputFormat), must not run in parallel. - t.Run(tt.name, func(t *testing.T) { - outputFormat = tt.outputFormat - if got := formatMessage(&tt.kafkaMsg, tt.rawMessage, tt.keyToDisplay, &tt.stderr); !bytes.Equal(got, tt.want) { - t.Errorf("formatMessage() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/cmd/kaf/group.go b/cmd/kaf/group.go deleted file mode 100644 index 02e8bbd5..00000000 --- a/cmd/kaf/group.go +++ /dev/null @@ -1,691 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "slices" - "sort" - "unicode" - - "text/tabwriter" - - "encoding/base64" - "encoding/hex" - - "sync" - - "github.com/IBM/sarama" - "github.com/birdayz/kaf/pkg/streams" - "github.com/manifoldco/promptui" - "github.com/spf13/cobra" - - "strconv" - - "time" -) - -var ( - flagPeekPartitions []int32 - flagPeekBefore int64 - flagPeekAfter int64 - flagPeekTopics []string - - flagNoMembers bool - flagDescribeTopics []string -) - -func init() { - rootCmd.AddCommand(groupCmd) - rootCmd.AddCommand(groupsCmd) - groupCmd.AddCommand(groupDescribeCmd) - groupCmd.AddCommand(groupLsCmd) - groupCmd.AddCommand(groupDeleteCmd) - groupCmd.AddCommand(groupPeekCmd) - groupCmd.AddCommand(createGroupCommitOffsetCmd()) - - groupLsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") - groupsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") - - groupPeekCmd.Flags().StringSliceVarP(&flagPeekTopics, "topics", "t", []string{}, "Topics to peek from") - groupPeekCmd.Flags().Int32SliceVarP(&flagPeekPartitions, "partitions", "p", []int32{}, "Partitions to peek from") - groupPeekCmd.Flags().Int64VarP(&flagPeekBefore, "before", "B", 0, "Number of messages to peek before current offset") - groupPeekCmd.Flags().Int64VarP(&flagPeekAfter, "after", "A", 0, "Number of messages to peek after current offset") - - groupDescribeCmd.Flags().BoolVar(&flagNoMembers, "no-members", false, "Hide members section of the output") - groupDescribeCmd.Flags().StringSliceVarP(&flagDescribeTopics, "topic", "t", []string{}, "topics to display for the group. defaults to all topics.") -} - -const ( - tabwriterMinWidth = 6 - tabwriterMinWidthNested = 2 - tabwriterWidth = 4 - tabwriterPadding = 3 - tabwriterPadChar = ' ' - tabwriterFlags = 0 -) - -var groupCmd = &cobra.Command{ - Use: "group", - Short: "Display information about consumer groups.", -} - -var groupsCmd = &cobra.Command{ - Use: "groups", - Short: "List groups", - Run: groupLsCmd.Run, -} - -var groupDeleteCmd = &cobra.Command{ - Use: "delete", - Short: "Delete group", - Args: cobra.MaximumNArgs(1), - ValidArgsFunction: validGroupArgs, - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - var group string - if len(args) == 1 { - group = args[0] - } - err := admin.DeleteConsumerGroup(group) - if err != nil { - errorExit("Could not delete consumer group %v: %v\n", group, err.Error()) - } else { - fmt.Printf("Deleted consumer group %v.\n", group) - } - - }, -} - -type resetHandler struct { - topic string - partitionOffsets map[int32]int64 - offset int64 - client sarama.Client - group string -} - -func (r *resetHandler) Setup(s sarama.ConsumerGroupSession) error { - req := &sarama.OffsetCommitRequest{ - Version: 1, - ConsumerGroup: r.group, - ConsumerGroupGeneration: s.GenerationID(), - ConsumerID: s.MemberID(), - } - - for p, o := range r.partitionOffsets { - req.AddBlock(r.topic, p, o, 0, "") - } - br, err := r.client.Coordinator(r.group) - if err != nil { - return err - } - _ = br.Open(getConfig()) - _, err = br.CommitOffset(req) - if err != nil { - return err - } - return nil -} - -func (r *resetHandler) Cleanup(s sarama.ConsumerGroupSession) error { - return nil -} - -func (r *resetHandler) ConsumeClaim(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error { - return nil -} - -func createGroupCommitOffsetCmd() *cobra.Command { - var topic string - var offset string - var partitionFlag int32 - var allPartitions bool - var offsetMap string - var noconfirm bool - res := &cobra.Command{ - Use: "commit GROUP", - Short: "Set offset for given consumer group", - Long: "Set offset for a given consumer group, creates one if it does not exist. Offsets cannot be set on a consumer group with active consumers.", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - client := getClient() - - group := args[0] - partitionOffsets := make(map[int32]int64) - - if offsetMap != "" { - if err := json.Unmarshal([]byte(offsetMap), &partitionOffsets); err != nil { - errorExit("Wrong --offset-map format. Use JSON with keys as partition numbers and values as offsets.\nExample: --offset-map '{\"0\":123, \"1\":135, \"2\":120}'\n") - } - } else { - var partitions []int32 - if allPartitions { - // Determine partitions - admin := getClusterAdmin() - topicDetails, err := admin.DescribeTopics([]string{topic}) - if err != nil { - errorExit("Unable to determine partitions of topic: %v\n", err) - } - - detail := topicDetails[0] - - for _, p := range detail.Partitions { - partitions = append(partitions, p.ID) - } - } else if partitionFlag != -1 { - partitions = []int32{partitionFlag} - } else { - errorExit("Either --partition, --all-partitions or --offset-map flag must be provided") - } - - sort.Slice(partitions, func(i int, j int) bool { return partitions[i] < partitions[j] }) - - type Assignment struct { - partition int32 - offset int64 - } - assignments := make(chan Assignment, len(partitions)) - - // TODO offset must be calced per partition - var wg sync.WaitGroup - for _, partition := range partitions { - wg.Add(1) - go func(partition int32) { - defer wg.Done() - i, err := strconv.ParseInt(offset, 10, 64) - if err != nil { - // Try oldest/newest/.. - if offset == "oldest" || offset == "earliest" { - i = sarama.OffsetOldest - } else if offset == "newest" || offset == "latest" { - i = sarama.OffsetNewest - } else { - // Try timestamp - t, err := time.Parse(time.RFC3339, offset) - if err != nil { - errorExit("offset is neither offset nor timestamp", nil) - } - i = t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) - } - - o, err := client.GetOffset(topic, partition, i) - if err != nil { - errorExit("Failed to determine offset for timestamp: %v", err) - } - - if o == -1 { - fmt.Printf("Partition %v: could not determine offset from timestamp. Skipping.\n", partition) - return - //errorExit("Determined offset -1 from timestamp. Skipping.", o) - } - - assignments <- Assignment{partition: partition, offset: o} - - fmt.Printf("Partition %v: determined offset %v from timestamp.\n", partition, o) - } else { - assignments <- Assignment{partition: partition, offset: i} - } - }(partition) - } - wg.Wait() - close(assignments) - - for assign := range assignments { - partitionOffsets[assign.partition] = assign.offset - } - } - - // Verify the Consumer Group is Empty - admin := getClusterAdmin() - groupDescs, err := admin.DescribeConsumerGroups([]string{args[0]}) - if err != nil { - errorExit("Unable to describe consumer groups: %v\n", err) - } - for _, detail := range groupDescs { - state := detail.State - if !slices.Contains([]string{"Empty", "Dead"}, state) { - errorExit("Consumer group %s has active consumers in it, cannot set offset\n", group) - } - } - - fmt.Printf("Resetting offsets to: %v\n", partitionOffsets) - - if !noconfirm { - prompt := promptui.Prompt{ - Label: "Reset offsets as described", - IsConfirm: true, - } - - _, err := prompt.Run() - if err != nil { - errorExit("Aborted, exiting.\n") - return - } - } - - g, err := sarama.NewConsumerGroupFromClient(group, client) - if err != nil { - errorExit("Failed to create consumer group: %v\n", err) - } - - err = g.Consume(context.Background(), []string{topic}, &resetHandler{ - topic: topic, - partitionOffsets: partitionOffsets, - client: client, - group: group, - }) - if err != nil { - errorExit("Failed to commit offset: %v\n", err) - } - - fmt.Printf("Successfully committed offsets to %v.\n", partitionOffsets) - - closeErr := g.Close() - if closeErr != nil { - fmt.Printf("Warning: Failed to close consumer group: %v\n", closeErr) - } - }, - } - res.Flags().StringVarP(&topic, "topic", "t", "", "topic") - res.Flags().StringVarP(&offset, "offset", "o", "", "offset to commit") - res.Flags().Int32VarP(&partitionFlag, "partition", "p", 0, "partition") - res.Flags().BoolVar(&allPartitions, "all-partitions", false, "apply to all partitions") - res.Flags().StringVar(&offsetMap, "offset-map", "", "set different offsets per different partitions in JSON format, e.g. {\"0\": 123, \"1\": 42}") - res.Flags().BoolVar(&noconfirm, "noconfirm", false, "Do not prompt for confirmation") - return res -} - -var groupLsCmd = &cobra.Command{ - Use: "ls", - Short: "List groups", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - groups, err := admin.ListConsumerGroups() - if err != nil { - errorExit("Unable to list consumer groups: %v\n", err) - } - - groupList := make([]string, 0, len(groups)) - for grp := range groups { - groupList = append(groupList, grp) - } - - sort.Slice(groupList, func(i int, j int) bool { - return groupList[i] < groupList[j] - }) - - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - - groupDescs, err := admin.DescribeConsumerGroups(groupList) - if err != nil { - // if we can retrieve list of consumer group, but unable to describe consumer groups - // fallback to only list group name without state - if !noHeaderFlag { - fmt.Fprintf(w, "NAME\n") - } - - for _, group := range groupList { - fmt.Fprintf(w, "%v\n", group) - } - } else { - // return consumer group information with state - if !noHeaderFlag { - fmt.Fprintf(w, "NAME\tSTATE\tCONSUMERS\t\n") - } - - for _, detail := range groupDescs { - state := detail.State - consumers := len(detail.Members) - fmt.Fprintf(w, "%v\t%v\t%v\t\n", detail.GroupId, state, consumers) - } - } - w.Flush() - }, -} - -var groupPeekCmd = &cobra.Command{ - Use: "peek", - Short: "Peek messages from consumer group offset", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validGroupArgs, - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - groups, err := admin.DescribeConsumerGroups([]string{args[0]}) - if err != nil { - errorExit("Unable to describe consumer groups: %v\n", err) - } - - if len(groups) == 0 { - errorExit("Did not receive expected describe consumergroup result\n") - } - group := groups[0] - - if group.State == "Dead" { - fmt.Printf("Group %v not found.\n", args[0]) - return - } - - peekPartitions := make(map[int32]struct{}) - for _, partition := range flagPeekPartitions { - peekPartitions[partition] = struct{}{} - } - - var topicPartitions map[string][]int32 - if len(flagPeekTopics) > 0 { - topicPartitions = make(map[string][]int32, len(flagPeekTopics)) - } - for _, topic := range flagPeekTopics { - topicDetails, err := admin.DescribeTopics([]string{topic}) - if err != nil { - errorExit("Unable to describe topics: %v\n", err) - } - - detail := topicDetails[0] - if detail.Err == sarama.ErrUnknownTopicOrPartition { - fmt.Printf("Topic %v not found.\n", topic) - return - } - - if len(flagPeekPartitions) > 0 { - topicPartitions[topic] = flagPeekPartitions - } else { - partitions := make([]int32, 0, len(detail.Partitions)) - for _, partition := range detail.Partitions { - partitions = append(partitions, partition.ID) - } - topicPartitions[topic] = partitions - } - } - - offsetAndMetadata, err := admin.ListConsumerGroupOffsets(args[0], topicPartitions) - if err != nil { - errorExit("Failed to fetch group offsets: %v\n", err) - } - - cfg := getConfig() - client := getClientFromConfig(cfg) - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - errorExit("Unable to create consumer from client: %v\n", err) - } - - mu := &sync.Mutex{} - wg := &sync.WaitGroup{} - - for topic, partitions := range offsetAndMetadata.Blocks { - for partition, offset := range partitions { - if len(peekPartitions) > 0 { - _, ok := peekPartitions[partition] - if !ok { - continue - } - } - - wg.Add(1) - go func(topic string, partition int32, offset int64) { - defer wg.Done() - var start int64 - if offset > flagPeekBefore { - start = offset - flagPeekBefore - } - - pc, err := consumer.ConsumePartition(topic, partition, start) - if err != nil { - errorExit("Unable to consume partition: %v %v %v %v\n", topic, partition, offset, err) - } - - for { - select { - case <-cmd.Context().Done(): - return - case msg := <-pc.Messages(): - handleMessage(msg, mu) - if msg.Offset >= offset+flagPeekAfter { - return - } - } - } - }(topic, partition, offset.Offset) - } - } - wg.Wait() - }, -} - -var groupDescribeCmd = &cobra.Command{ - Use: "describe", - Short: "Describe consumer group", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validGroupArgs, - Run: func(cmd *cobra.Command, args []string) { - // TODO List: This API can be used to find the current groups managed by a broker. To get a list of all groups in the cluster, you must send ListGroup to all brokers. - // same goes probably for topics - admin := getClusterAdmin() - - groups, err := admin.DescribeConsumerGroups([]string{args[0]}) - if err != nil { - errorExit("Unable to describe consumer groups: %v\n", err) - } - - if len(groups) == 0 { - errorExit("Did not receive expected describe consumergroup result\n") - } - group := groups[0] - - if group.State == "Dead" { - fmt.Printf("Group %v not found.\n", args[0]) - return - } - - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - fmt.Fprintf(w, "Group ID:\t%v\n", group.GroupId) - fmt.Fprintf(w, "State:\t%v\n", group.State) - fmt.Fprintf(w, "Protocol:\t%v\n", group.Protocol) - fmt.Fprintf(w, "Protocol Type:\t%v\n", group.ProtocolType) - - fmt.Fprintf(w, "Offsets:\t\n") - - w.Flush() - w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) - - offsetAndMetadata, err := admin.ListConsumerGroupOffsets(args[0], nil) - if err != nil { - errorExit("Failed to fetch group offsets: %v\n", err) - } - - topics := make([]string, 0, len(offsetAndMetadata.Blocks)) - for k := range offsetAndMetadata.Blocks { - topics = append(topics, k) - } - sort.Strings(topics) - - for _, topic := range topics { - partitions := offsetAndMetadata.Blocks[topic] - if len(flagDescribeTopics) > 0 { - var found bool - for _, topicToShow := range flagDescribeTopics { - if topic == topicToShow { - found = true - } - } - - if !found { - continue - } - } - fmt.Fprintf(w, "\t%v:\n", topic) - fmt.Fprintf(w, "\t\tPartition\tGroup Offset\tHigh Watermark\tLag\tMetadata\t\n") - fmt.Fprintf(w, "\t\t---------\t------------\t--------------\t---\t--------\n") - - var p []int32 - - for partition := range partitions { - p = append(p, partition) - } - - sort.Slice(p, func(i, j int) bool { - return p[i] < p[j] - }) - - wms := getHighWatermarks(topic, p) - - lagSum := 0 - offsetSum := 0 - for _, partition := range p { - lag := (wms[partition] - partitions[partition].Offset) - lagSum += int(lag) - offset := partitions[partition].Offset - offsetSum += int(offset) - fmt.Fprintf(w, "\t\t%v\t%v\t%v\t%v\t%v\n", partition, partitions[partition].Offset, wms[partition], (wms[partition] - partitions[partition].Offset), partitions[partition].Metadata) - } - - fmt.Fprintf(w, "\t\tTotal\t%d\t\t%d\t\n", offsetSum, lagSum) - } - - if !flagNoMembers { - - fmt.Fprintf(w, "Members:\t") - - w.Flush() - w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) - - fmt.Fprintln(w) - for _, member := range group.Members { - fmt.Fprintf(w, "\t%v:\n", member.ClientId) - fmt.Fprintf(w, "\t\tHost:\t%v\n", member.ClientHost) - - assignment, err := member.GetMemberAssignment() - if err != nil || assignment == nil { - continue - } - - fmt.Fprintf(w, "\t\tAssignments:\n") - - fmt.Fprintf(w, "\t\t Topic\tPartitions\t\n") - fmt.Fprintf(w, "\t\t -----\t----------\t") - - for topic, partitions := range assignment.Topics { - fmt.Fprintf(w, "\n\t\t %v\t%v\t", topic, partitions) - } - - metadata, err := member.GetMemberMetadata() - if err != nil { - fmt.Fprintf(w, "\n") - continue - } - - decodedUserData, err := tryDecodeUserData(group.Protocol, metadata.UserData) - if err != nil { - if IsASCIIPrintable(string(metadata.UserData)) { - fmt.Fprintf(w, "\f\t\tMetadata:\t%v\n", string(metadata.UserData)) - } else { - - fmt.Fprintf(w, "\f\t\tMetadata:\t%v\n", base64.StdEncoding.EncodeToString(metadata.UserData)) - } - } else { - switch d := decodedUserData.(type) { - case streams.SubscriptionInfo: - fmt.Fprintf(w, "\f\t\tMetadata:\t\n") - fmt.Fprintf(w, "\t\t UUID:\t0x%v\n", hex.EncodeToString(d.UUID)) - fmt.Fprintf(w, "\t\t UserEndpoint:\t%v\n", d.UserEndpoint) - } - } - - fmt.Fprintf(w, "\n") - - } - } - - w.Flush() - - }, -} - -func getHighWatermarks(topic string, partitions []int32) (watermarks map[int32]int64) { - client := getClient() - leaders := make(map[*sarama.Broker][]int32) - - for _, partition := range partitions { - leader, err := client.Leader(topic, partition) - if err != nil { - errorExit("Unable to get available offsets for partition without leader. Topic %s Partition %d, Error: %s ", topic, partition, err) - } - leaders[leader] = append(leaders[leader], partition) - } - wg := sync.WaitGroup{} - wg.Add(len(leaders)) - - results := make(chan map[int32]int64, len(leaders)) - - for leader, partitions := range leaders { - req := &sarama.OffsetRequest{ - Version: int16(1), - } - - for _, partition := range partitions { - req.AddBlock(topic, partition, int64(-1), int32(0)) - } - - // Query distinct brokers in parallel - go func(leader *sarama.Broker, req *sarama.OffsetRequest) { - resp, err := leader.GetAvailableOffsets(req) - if err != nil { - errorExit("Unable to get available offsets: %v\n", err) - } - - watermarksFromLeader := make(map[int32]int64) - for partition, block := range resp.Blocks[topic] { - watermarksFromLeader[partition] = block.Offset - } - - results <- watermarksFromLeader - wg.Done() - - }(leader, req) - - } - - wg.Wait() - close(results) - - watermarks = make(map[int32]int64) - for resultMap := range results { - for partition, offset := range resultMap { - watermarks[partition] = offset - } - } - - return -} - -// IsASCIIPrintable returns true if the string is ASCII printable. -func IsASCIIPrintable(s string) bool { - for _, r := range s { - if r > unicode.MaxASCII || !unicode.IsPrint(r) { - return false - } - } - return true -} - -func tryDecodeUserData(protocol string, raw []byte) (data interface{}, err error) { - // Interpret userdata here - decoder := streams.NewDecoder(raw) - - switch protocol { - case "stream": - subscriptionInfo := streams.SubscriptionInfo{} - err = subscriptionInfo.Decode(decoder) - if err != nil { - return nil, err - } - return subscriptionInfo, nil - default: - return nil, errors.New("unknown protocol") - } -} diff --git a/cmd/kaf/kaf.go b/cmd/kaf/kaf.go deleted file mode 100644 index e435d538..00000000 --- a/cmd/kaf/kaf.go +++ /dev/null @@ -1,265 +0,0 @@ -package main - -import ( - "fmt" - "io" - - "crypto/tls" - "crypto/x509" - "log" - "os" - - "github.com/IBM/sarama" - "github.com/mattn/go-colorable" - "github.com/spf13/cobra" - - "github.com/birdayz/kaf/pkg/avro" - "github.com/birdayz/kaf/pkg/config" - "github.com/birdayz/kaf/pkg/proto" -) - -var cfgFile string - -func getConfig() (saramaConfig *sarama.Config) { - saramaConfig = sarama.NewConfig() - saramaConfig.Version = sarama.V1_1_0_0 - saramaConfig.Producer.Return.Successes = true - - cluster := currentCluster - if cluster.Version != "" { - parsedVersion, err := sarama.ParseKafkaVersion(cluster.Version) - if err != nil { - errorExit("Unable to parse Kafka version: %v\n", err) - } - saramaConfig.Version = parsedVersion - } - if cluster.SASL != nil { - saramaConfig.Net.SASL.Enable = true - if cluster.SASL.Mechanism != "OAUTHBEARER" { - saramaConfig.Net.SASL.User = cluster.SASL.Username - saramaConfig.Net.SASL.Password = cluster.SASL.Password - } - saramaConfig.Net.SASL.Version = cluster.SASL.Version - } - if cluster.TLS != nil && cluster.SecurityProtocol != "SASL_SSL" { - saramaConfig.Net.TLS.Enable = true - tlsConfig := &tls.Config{ - InsecureSkipVerify: cluster.TLS.Insecure, - } - - if cluster.TLS.Cafile != "" { - caCert, err := os.ReadFile(cluster.TLS.Cafile) - if err != nil { - errorExit("Unable to read Cafile :%v\n", err) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - } - - if cluster.TLS.Clientfile != "" && cluster.TLS.Clientkeyfile != "" { - clientCert, err := os.ReadFile(cluster.TLS.Clientfile) - if err != nil { - errorExit("Unable to read Clientfile :%v\n", err) - } - clientKey, err := os.ReadFile(cluster.TLS.Clientkeyfile) - if err != nil { - errorExit("Unable to read Clientkeyfile :%v\n", err) - } - - cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) - if err != nil { - errorExit("Unable to create KeyPair: %v\n", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // nolint - tlsConfig.BuildNameToCertificate() - } - saramaConfig.Net.TLS.Config = tlsConfig - } - if cluster.SecurityProtocol == "SASL_SSL" { - saramaConfig.Net.TLS.Enable = true - if cluster.TLS != nil { - tlsConfig := &tls.Config{ - InsecureSkipVerify: cluster.TLS.Insecure, - } - if cluster.TLS.Cafile != "" { - caCert, err := os.ReadFile(cluster.TLS.Cafile) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - } - saramaConfig.Net.TLS.Config = tlsConfig - - } else { - saramaConfig.Net.TLS.Config = &tls.Config{InsecureSkipVerify: false} - } - } - if cluster.SecurityProtocol == "SASL_SSL" || cluster.SecurityProtocol == "SASL_PLAINTEXT" { - if cluster.SASL.Mechanism == "SCRAM-SHA-512" { - saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } - saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) - } else if cluster.SASL.Mechanism == "SCRAM-SHA-256" { - saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } - saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) - } else if cluster.SASL.Mechanism == "OAUTHBEARER" || cluster.SASL.Mechanism == "AWS_MSK_IAM" { - //Here setup get token function - saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeOAuth) - saramaConfig.Net.SASL.TokenProvider = newTokenProvider() - } - } - return saramaConfig -} - -var ( - outWriter io.Writer = os.Stdout - errWriter io.Writer = os.Stderr - inReader io.Reader = os.Stdin - - colorableOut io.Writer = colorable.NewColorableStdout() -) - -// Will be replaced by GitHub action and by goreleaser -// see https://goreleaser.com/customization/build/ -var commit string = "HEAD" -var version string = "latest" - -var rootCmd = &cobra.Command{ - Use: "kaf", - Short: "Kafka Command Line utility for cluster management", - Version: fmt.Sprintf("%s (%s)", version, commit), - PersistentPreRun: func(cmd *cobra.Command, args []string) { - outWriter = cmd.OutOrStdout() - errWriter = cmd.ErrOrStderr() - inReader = cmd.InOrStdin() - - if outWriter != os.Stdout { - colorableOut = outWriter - } - }, -} - -func main() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -var cfg config.Config -var currentCluster *config.Cluster - -var ( - brokersFlag []string - schemaRegistryURL string - protoFiles []string - protoExclude []string - decodeMsgPack bool - verbose bool - clusterOverride string -) - -func init() { - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.kaf/config)") - rootCmd.PersistentFlags().StringSliceVarP(&brokersFlag, "brokers", "b", nil, "Comma separated list of broker ip:port pairs") - rootCmd.PersistentFlags().StringVar(&schemaRegistryURL, "schema-registry", "", "URL to a Confluent schema registry. Used for attempting to decode Avro-encoded messages") - rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Whether to turn on sarama logging") - rootCmd.PersistentFlags().StringVarP(&clusterOverride, "cluster", "c", "", "set a temporary current cluster") - cobra.OnInitialize(onInit) -} - -var setupProtoDescriptorRegistry = func(cmd *cobra.Command, args []string) { - if protoType != "" { - r, err := proto.NewDescriptorRegistry(protoFiles, protoExclude) - if err != nil { - errorExit("Failed to load protobuf files: %v\n", err) - } - reg = r - } -} - -func onInit() { - var err error - cfg, err = config.ReadConfig(cfgFile) - if err != nil { - errorExit("Invalid config: %v", err) - } - - cfg.ClusterOverride = clusterOverride - - cluster := cfg.ActiveCluster() - if cluster != nil { - // Use active cluster from config - currentCluster = cluster - } else { - // Create sane default if not configured - currentCluster = &config.Cluster{ - Brokers: []string{"localhost:9092"}, - } - } - - // Any set flags override the configuration - if schemaRegistryURL != "" { - currentCluster.SchemaRegistryURL = schemaRegistryURL - currentCluster.SchemaRegistryCredentials = nil - } - - if brokersFlag != nil { - currentCluster.Brokers = brokersFlag - } - - if verbose { - sarama.Logger = log.New(errWriter, "[sarama] ", log.Lshortfile|log.LstdFlags) - } -} - -func getClusterAdmin() (admin sarama.ClusterAdmin) { - clusterAdmin, err := sarama.NewClusterAdmin(currentCluster.Brokers, getConfig()) - if err != nil { - errorExit("Unable to get cluster admin: %v\n", err) - } - - return clusterAdmin -} - -func getClient() (client sarama.Client) { - client, err := sarama.NewClient(currentCluster.Brokers, getConfig()) - if err != nil { - errorExit("Unable to get client: %v\n", err) - } - return client -} - -func getClientFromConfig(config *sarama.Config) (client sarama.Client) { - client, err := sarama.NewClient(currentCluster.Brokers, config) - if err != nil { - errorExit("Unable to get client: %v\n", err) - } - return client -} - -func getSchemaCache() (cache *avro.SchemaCache) { - if currentCluster.SchemaRegistryURL == "" { - return nil - } - var username, password string - if creds := currentCluster.SchemaRegistryCredentials; creds != nil { - username = creds.Username - password = creds.Password - } - cache, err := avro.NewSchemaCache(currentCluster.SchemaRegistryURL, username, password) - if err != nil { - errorExit("Unable to get schema cache :%v\n", err) - } - return cache -} - -func errorExit(format string, a ...interface{}) { - fmt.Fprintf(errWriter, format+"\n", a...) - os.Exit(1) -} diff --git a/cmd/kaf/main.go b/cmd/kaf/main.go new file mode 100644 index 00000000..744b9506 --- /dev/null +++ b/cmd/kaf/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "os" + + "github.com/birdayz/kaf/pkg/cmd" +) + +// Set by goreleaser / GitHub Actions via ldflags. +var ( + commit = "HEAD" + version = "latest" +) + +func main() { + if err := cmd.Execute(version, commit); err != nil { + os.Exit(1) + } +} diff --git a/cmd/kaf/main_test.go b/cmd/kaf/main_test.go deleted file mode 100644 index a963176d..00000000 --- a/cmd/kaf/main_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package main - -import ( - "bytes" - "context" - "io" - "log" - "os" - "testing" - "time" - - "github.com/orlangure/gnomock" - "github.com/orlangure/gnomock/preset/kafka" - "github.com/stretchr/testify/require" -) - -var kafkaAddr string - -func TestMain(m *testing.M) { - os.Exit(testMain(m)) -} - -func testMain(m *testing.M) (code int) { - c, err := gnomock.Start( - kafka.Preset(kafka.WithTopics("kaf-testing", "gnomock-kafka")), - gnomock.WithContainerName("kaf-kafka"), - ) - if err != nil { - log.Println(err) - return 1 - } - - defer func() { - stopErr := gnomock.Stop(c) - if stopErr != nil { - code = 1 - } - }() - - kafkaAddr = c.Address(kafka.BrokerPort) - - return m.Run() -} - -func runCmd(t *testing.T, in io.Reader, args ...string) string { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - - b := bytes.NewBufferString("") - - rootCmd.SetArgs(args) - rootCmd.SetOut(b) - rootCmd.SetErr(b) - rootCmd.SetIn(in) - - require.NoError(t, rootCmd.ExecuteContext(ctx)) - - bs, err := io.ReadAll(b) - require.NoError(t, err) - - return string(bs) -} - -func runCmdWithBroker(t *testing.T, in io.Reader, args ...string) string { - args = append([]string{"-b", kafkaAddr}, args...) - return runCmd(t, in, args...) -} diff --git a/cmd/kaf/message.go b/cmd/kaf/message.go deleted file mode 100644 index 2fd35a54..00000000 --- a/cmd/kaf/message.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import "time" - -type MessageHeader struct { - Key string `json:"key"` - Value string `json:"value"` -} -type JSONEachRowMessage struct { - Topic string `json:"topic"` - Partition int32 `json:"partition"` - Offset int64 `json:"offset"` - Timestamp time.Time `json:"timestamp"` - Headers []MessageHeader `json:"headers"` - Key string `json:"key"` - Payload string `json:"payload"` -} diff --git a/cmd/kaf/node.go b/cmd/kaf/node.go deleted file mode 100644 index 09e6014b..00000000 --- a/cmd/kaf/node.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "text/tabwriter" - - "sort" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(nodeCommand) - rootCmd.AddCommand(nodesCommand) - nodeCommand.AddCommand(nodeLsCommand) - nodeLsCommand.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") -} - -var nodesCommand = &cobra.Command{ - Use: "nodes", - Short: "List nodes in a cluster", - Run: nodeLsCommand.Run, -} - -var nodeCommand = &cobra.Command{ - Use: "node", - Short: "Describe and List nodes", -} - -var nodeLsCommand = &cobra.Command{ - Use: "ls", - Short: "List nodes in a cluster", - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - brokers, ctlID, err := admin.DescribeCluster() - if err != nil { - errorExit("Unable to describe cluster: %v\n", err) - } - - sort.Slice(brokers, func(i, j int) bool { - return brokers[i].ID() < brokers[j].ID() - }) - - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - if !noHeaderFlag { - _, _ = fmt.Fprintf(w, "ID\tADDRESS\tCONTROLLER\t\n") - } - - for _, broker := range brokers { - _, _ = fmt.Fprintf(w, "%v\t%v\t%v\t\n", broker.ID(), broker.Addr(), broker.ID() == ctlID) - } - - w.Flush() - }, -} diff --git a/cmd/kaf/node_test.go b/cmd/kaf/node_test.go deleted file mode 100644 index 822c4467..00000000 --- a/cmd/kaf/node_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNode(t *testing.T) { - out := runCmdWithBroker(t, nil, "node", "ls") - require.Contains(t, out, kafkaAddr) -} diff --git a/cmd/kaf/nodes_test.go b/cmd/kaf/nodes_test.go deleted file mode 100644 index e62dd1e9..00000000 --- a/cmd/kaf/nodes_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNodes(t *testing.T) { - out := runCmdWithBroker(t, nil, "nodes") - require.Contains(t, out, kafkaAddr) -} diff --git a/cmd/kaf/oauth.go b/cmd/kaf/oauth.go deleted file mode 100644 index a5248c45..00000000 --- a/cmd/kaf/oauth.go +++ /dev/null @@ -1,134 +0,0 @@ -package main - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/IBM/sarama" - aws_signer "github.com/aws/aws-msk-iam-sasl-signer-go/signer" - aws_config "github.com/aws/aws-sdk-go-v2/config" - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" -) - -var ( - once sync.Once - tokenProv *tokenProvider - refreshBuffer time.Duration = time.Second * 20 - tokenFetchTimeout time.Duration = time.Second * 10 -) - -var _ sarama.AccessTokenProvider = &tokenProvider{} - -type tokenProvider struct { - // refreshMutex is used to ensure that tokens are not refreshed concurrently. - refreshMutex sync.Mutex - // The time at which the token expires. - expiresAt time.Time - // The time at which the token should be replaced. - replaceAt time.Time - // The currently cached token value. - currentToken string - // ctx for token fetching - ctx context.Context - // cfg for token fetching from - oauthClientCFG *clientcredentials.Config - // static token - staticToken bool -} - -// This is a singleton -func newTokenProvider() *tokenProvider { - once.Do(func() { - cluster := currentCluster - ctx := context.Background() - - // token either from tokenURL, static or AWS API - if cluster.SASL.Mechanism == "AWS_MSK_IAM" { - cfg, err := aws_config.LoadDefaultConfig(ctx) - if err != nil { - errorExit("Could not load AWS config: " + err.Error()) - } - token, _, err := aws_signer.GenerateAuthToken(ctx, cfg.Region) - if err != nil { - errorExit("Could not generate auth token: " + err.Error()) - } - tokenProv = &tokenProvider{ - oauthClientCFG: &clientcredentials.Config{}, - staticToken: true, - currentToken: token, - } - } else if len(cluster.SASL.Token) != 0 { - tokenProv = &tokenProvider{ - oauthClientCFG: &clientcredentials.Config{}, - staticToken: true, - currentToken: cluster.SASL.Token, - } - } else { - tokenProv = &tokenProvider{ - oauthClientCFG: &clientcredentials.Config{ - ClientID: cluster.SASL.ClientID, - ClientSecret: cluster.SASL.ClientSecret, - TokenURL: cluster.SASL.TokenURL, - Scopes: cluster.SASL.Scopes, - }, - staticToken: false, - } - } - if !tokenProv.staticToken { - // create context with timeout - httpClient := &http.Client{Timeout: tokenFetchTimeout} - ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) - tokenProv.ctx = ctx - - // get first token - firstToken, err := tokenProv.oauthClientCFG.Token(ctx) - if err != nil { - errorExit("Could not fetch OAUTH token: " + err.Error()) - } - tokenProv.currentToken = firstToken.AccessToken - tokenProv.expiresAt = firstToken.Expiry - tokenProv.replaceAt = firstToken.Expiry.Add(-refreshBuffer) - } - }) - return tokenProv -} - -func (tp *tokenProvider) Token() (*sarama.AccessToken, error) { - - if !tp.staticToken { - if time.Now().After(tp.replaceAt) { - if err := tp.refreshToken(); err != nil { - return nil, err - } - - } - } - return &sarama.AccessToken{ - Token: tp.currentToken, - Extensions: nil, - }, nil -} - -func (tp *tokenProvider) refreshToken() error { - // Get a lock on the update - tp.refreshMutex.Lock() - defer tp.refreshMutex.Unlock() - - // Check whether another call refreshed the token while waiting for the lock to be acquired here - if time.Now().Before(tp.replaceAt) { - return nil - } - - token, err := tp.oauthClientCFG.Token(tp.ctx) - if err != nil { - return err - } - // Save the token - tp.currentToken = token.AccessToken - tp.expiresAt = token.Expiry - tp.replaceAt = token.Expiry.Add(-refreshBuffer) - return nil -} diff --git a/cmd/kaf/produce.go b/cmd/kaf/produce.go deleted file mode 100644 index 2aafe5bc..00000000 --- a/cmd/kaf/produce.go +++ /dev/null @@ -1,330 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "os" - "strings" - "text/template" - - "time" - - "github.com/IBM/sarama" - "github.com/Masterminds/sprig" - "github.com/birdayz/kaf/pkg/partitioner" - pb "github.com/golang/protobuf/proto" - "github.com/spf13/cobra" -) - -var ( - keyFlag string - rawKeyFlag bool - headerFlag []string - repeatFlag int - partitionerFlag string - timestampFlag string - partitionFlag int32 - bufferSizeFlag int - inputModeFlag string - avroSchemaID int - avroKeySchemaID int - templateFlag bool - inputFormatFlag = InputFormatDefault -) - -func init() { - rootCmd.AddCommand(produceCmd) - - produceCmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key for the record. Currently only strings are supported.") - produceCmd.Flags().BoolVar(&rawKeyFlag, "raw-key", false, "Treat value of --key as base64 and use its decoded raw value as key") - produceCmd.Flags().StringArrayVarP(&headerFlag, "header", "H", []string{}, "Header in format :. May be used multiple times to add more headers.") - produceCmd.Flags().IntVarP(&repeatFlag, "repeat", "n", 1, "Repeat records to send.") - - produceCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") - produceCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") - produceCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") - - produceCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") - produceCmd.Flags().StringVar(&partitionerFlag, "partitioner", "", "Select partitioner: [jvm|rand|rr|hash]") - produceCmd.Flags().StringVar(×tampFlag, "timestamp", "", "Select timestamp for record") - produceCmd.Flags().Int32VarP(&partitionFlag, "partition", "p", -1, "Partition to produce to") - - produceCmd.Flags().IntVarP(&avroSchemaID, "avro-schema-id", "", -1, "Value schema id for avro messsage encoding") - produceCmd.Flags().IntVarP(&avroKeySchemaID, "avro-key-schema-id", "", -1, "Key schema id for avro messsage encoding") - - produceCmd.Flags().StringVarP(&inputModeFlag, "input-mode", "", "line", "Scanning input mode: [line|full]") - produceCmd.Flags().Var(&inputFormatFlag, "input", "Set input format messages: default, hex, json-each-row (json-each-row is compatible with output of kaf consume --output json-each-row)") - produceCmd.Flags().IntVarP(&bufferSizeFlag, "line-length-limit", "", 0, "line length limit in line input mode") - - produceCmd.Flags().BoolVar(&templateFlag, "template", false, "run data through go template engine") - - if err := produceCmd.RegisterFlagCompletionFunc("input", completeInputFormat); err != nil { - errorExit("Failed to register flag completion: %v", err) - } -} - -func readLines(reader io.Reader, out chan []byte) { - scanner := bufio.NewScanner(reader) - if bufferSizeFlag > 0 { - scanner.Buffer(make([]byte, bufferSizeFlag), bufferSizeFlag) - } - - for scanner.Scan() { - out <- bytes.Clone(scanner.Bytes()) - } - close(out) - - if err := scanner.Err(); err != nil { - errorExit("scanning input failed: %v\n", err) - } -} - -func readFull(reader io.Reader, out chan []byte) { - data, err := io.ReadAll(inReader) - if err != nil { - errorExit("Unable to read data\n") - } - out <- data - close(out) -} - -var produceCmd = &cobra.Command{ - Use: "produce TOPIC", - Short: "Produce record. Reads data from stdin.", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validTopicArgs, - PreRun: setupProtoDescriptorRegistry, - Run: func(cmd *cobra.Command, args []string) { - cfg := getConfig() - switch partitionerFlag { - case "jvm": - cfg.Producer.Partitioner = partitioner.NewJVMCompatiblePartitioner - case "rand": - cfg.Producer.Partitioner = sarama.NewRandomPartitioner - case "rr": - cfg.Producer.Partitioner = sarama.NewRoundRobinPartitioner - } - - if partitionFlag != int32(-1) { - cfg.Producer.Partitioner = sarama.NewManualPartitioner - } - - producer, err := sarama.NewSyncProducer(currentCluster.Brokers, cfg) - if err != nil { - errorExit("Unable to create new sync producer: %v\n", err) - } - - if avroSchemaID != -1 || avroKeySchemaID != -1 { - schemaCache = getSchemaCache() - if schemaCache == nil { - errorExit("Could not connect to schema registry") - } - } - - out := make(chan []byte, 1) - switch inputModeFlag { - case "full": - go readFull(inReader, out) - default: - go readLines(inReader, out) - } - - var key sarama.Encoder - if rawKeyFlag { - keyBytes, err := base64.RawStdEncoding.DecodeString(keyFlag) - if err != nil { - errorExit("--raw-key is given, but value of --key is not base64") - } - key = sarama.ByteEncoder(keyBytes) - } else { - key = sarama.StringEncoder(keyFlag) - } - if keyProtoType != "" { - if dynamicMessage := reg.MessageForType(keyProtoType); dynamicMessage != nil { - err = dynamicMessage.UnmarshalJSON([]byte(keyFlag)) - if err != nil { - errorExit("Failed to parse input JSON as proto type %v: %v", protoType, err) - } - - pb, err := pb.Marshal(dynamicMessage) - if err != nil { - errorExit("Failed to marshal proto: %v", err) - } - - key = sarama.ByteEncoder(pb) - } else { - errorExit("Failed to load key proto type") - } - - } else if avroKeySchemaID != -1 { - avroKey, err := schemaCache.EncodeMessage(avroKeySchemaID, []byte(keyFlag)) - if err != nil { - errorExit("Failed to encode avro key", err) - } - key = sarama.ByteEncoder(avroKey) - } - - var headers []sarama.RecordHeader - for _, h := range headerFlag { - v := strings.SplitN(h, ":", 2) - if len(v) == 2 { - headers = append(headers, sarama.RecordHeader{ - Key: []byte(v[0]), - Value: []byte(v[1]), - }) - } - } - - for data := range out { - - for i := 0; i < repeatFlag; i++ { - - input := data - - if templateFlag { - vars := map[string]interface{}{} - vars["i"] = i - tpl := template.New("kaf").Funcs(sprig.TxtFuncMap()) - - tpl, err = tpl.Parse(string(data)) - if err != nil { - errorExit("failed to parse go template: %v", err) - } - - buf := bytes.NewBuffer(nil) - - if err := tpl.Execute(buf, vars); err != nil { - errorExit("failed to execute go template: %v", err) - } - - input = buf.Bytes() - } - - // Encode to..something - - var marshaledInput []byte - - if protoType != "" { - if dynamicMessage := reg.MessageForType(protoType); dynamicMessage != nil { - err = dynamicMessage.UnmarshalJSON(input) - if err != nil { - errorExit("Failed to parse input JSON as proto type %v: %v", protoType, err) - } - - pb, err := pb.Marshal(dynamicMessage) - if err != nil { - errorExit("Failed to marshal proto: %v", err) - } - - marshaledInput = pb - } else { - errorExit("Failed to load payload proto type") - } - } else if avroSchemaID != -1 { - avro, err := schemaCache.EncodeMessage(avroSchemaID, data) - if err != nil { - errorExit("Failed to encode avro value", err) - } - marshaledInput = avro - } else { - marshaledInput = input - } - - var ts time.Time - t, err := time.Parse(time.RFC3339, timestampFlag) - if err != nil { - ts = time.Now() - } else { - ts = t - } - - msg := &sarama.ProducerMessage{ - Topic: args[0], - Timestamp: ts, - } - - if inputFormatFlag == InputFormatJSONEachRow { - jsonEachRowMsg := JSONEachRowMessage{} - if err = json.Unmarshal(marshaledInput, &jsonEachRowMsg); err == nil { - if keyFlag == "" { - // if key flag not set, use the key from stdin - key = sarama.StringEncoder(jsonEachRowMsg.Key) - } - - for _, h := range jsonEachRowMsg.Headers { - msg.Headers = append(msg.Headers, sarama.RecordHeader{ - Key: []byte(h.Key), - Value: []byte(h.Value), - }) - } - - msg.Partition = jsonEachRowMsg.Partition - marshaledInput = []byte(jsonEachRowMsg.Payload) - } - } else if inputFormatFlag == InputFormatHex { - dst := make([]byte, hex.DecodedLen(len(marshaledInput))) - if _, err := hex.Decode(dst, marshaledInput); err != nil { - fmt.Fprintf(outWriter, "Failed to decode hex input: %v.", err) - } else { - marshaledInput = dst - } - } - - msg.Key = key - msg.Value = sarama.ByteEncoder(marshaledInput) - - if len(headers) > 0 { - // override headers if they were set - msg.Headers = headers - } - - if partitionFlag != -1 { - msg.Partition = partitionFlag - } - - partition, offset, err := producer.SendMessage(msg) - if err != nil { - fmt.Fprintf(outWriter, "Failed to send record: %v.", err) - os.Exit(1) - } - - fmt.Fprintf(outWriter, "Sent record to partition %v at offset %v.\n", partition, offset) - } - } - }, -} - -type InputFormat string - -const ( - InputFormatDefault InputFormat = "default" - InputFormatJSONEachRow InputFormat = "json-each-row" - InputFormatHex InputFormat = "hex" -) - -func (e *InputFormat) String() string { - return string(*e) -} - -func (e *InputFormat) Set(v string) error { - switch v { - case "default", "json-each-row", "hex": - *e = InputFormat(v) - return nil - default: - return fmt.Errorf("must be one of: default, json-each-row, hex") - } -} - -func (e *InputFormat) Type() string { - return "InputFormat" -} - -func completeInputFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"default", "json-each-row", "hex"}, cobra.ShellCompDirectiveNoFileComp -} diff --git a/cmd/kaf/produce_consume_test.go b/cmd/kaf/produce_consume_test.go deleted file mode 100644 index f5777fd8..00000000 --- a/cmd/kaf/produce_consume_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestProduceConsume(t *testing.T) { - msg := "this is a test" - - t.Run("produce a message", func(t *testing.T) { - buf := bytes.NewBufferString(msg) - - out := runCmdWithBroker(t, buf, "produce", "gnomock-kafka") - require.Contains(t, out, "Sent record") - }) - - t.Run("consume a message", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "consume", "gnomock-kafka") - require.Contains(t, out, msg) - }) -} diff --git a/cmd/kaf/query.go b/cmd/kaf/query.go deleted file mode 100644 index bfe46435..00000000 --- a/cmd/kaf/query.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "fmt" - "sync" - - "strings" - - "github.com/IBM/sarama" - "github.com/spf13/cobra" -) - -var grepValue string - -func init() { - rootCmd.AddCommand(queryCmd) - - queryCmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key to search for") - queryCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") - queryCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") - queryCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") - queryCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") - - queryCmd.Flags().StringVar(&grepValue, "grep", "", "Grep for value") - -} - -var queryCmd = &cobra.Command{ - Use: "query TOPIC", - Short: "Query topic by key", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validTopicArgs, - PreRun: setupProtoDescriptorRegistry, - Run: func(cmd *cobra.Command, args []string) { - topic := args[0] - client := getClient() - - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - errorExit("Unable to create consumer from client: %v\n", err) - } - - partitions, err := consumer.Partitions(topic) - if err != nil { - errorExit("Unable to get partitions: %v\n", err) - } - - schemaCache = getSchemaCache() - - wg := sync.WaitGroup{} - - for _, partition := range partitions { - wg.Add(1) - go func(partition int32) { - defer wg.Done() - highWatermark, err := client.GetOffset(topic, partition, sarama.OffsetNewest) - if err != nil { - errorExit("Failed to get high watermark: %w", err) - } - - if highWatermark == 0 { - return - } - - pc, err := consumer.ConsumePartition(topic, partition, sarama.OffsetOldest) - if err != nil { - errorExit("Unable to consume partition: %v\n", err) - } - - for msg := range pc.Messages() { - if string(msg.Key) == keyFlag { - var keyTextRaw string - var valueTextRaw string - if protoType != "" { - d, err := protoDecode(reg, msg.Value, protoType) - if err != nil { - fmt.Println("Failed proto decode") - } - valueTextRaw = string(d) - } else { - valueTextRaw = string(msg.Value) - } - - if keyProtoType != "" { - d, err := protoDecode(reg, msg.Key, keyProtoType) - if err != nil { - fmt.Println("Failed proto decode") - } - keyTextRaw = string(d) - } else { - keyTextRaw = string(msg.Key) - } - - match := true - if grepValue != "" { - if !strings.Contains(valueTextRaw, grepValue) { - match = false - } - } - - if match { - fmt.Printf("Key: %v\n", keyTextRaw) - fmt.Printf("Value: %v\n", valueTextRaw) - } - - if msg.Offset == pc.HighWaterMarkOffset()-1 { - break - } - } - } - }(partition) - } - - wg.Wait() - }, -} diff --git a/cmd/kaf/scram_client.go b/cmd/kaf/scram_client.go deleted file mode 100644 index 6f622817..00000000 --- a/cmd/kaf/scram_client.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "crypto/sha256" - "crypto/sha512" - "hash" - - "github.com/xdg/scram" -) - -var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } -var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } - -type XDGSCRAMClient struct { - *scram.Client - *scram.ClientConversation - scram.HashGeneratorFcn -} - -func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) - if err != nil { - return err - } - x.ClientConversation = x.Client.NewConversation() - return nil -} - -func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { - response, err = x.ClientConversation.Step(challenge) - return -} - -func (x *XDGSCRAMClient) Done() bool { - return x.ClientConversation.Done() -} diff --git a/cmd/kaf/topic.go b/cmd/kaf/topic.go deleted file mode 100644 index d845cec6..00000000 --- a/cmd/kaf/topic.go +++ /dev/null @@ -1,471 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "slices" - "sort" - "strings" - "text/tabwriter" - - "github.com/IBM/sarama" - "github.com/spf13/cobra" -) - -var ( - partitionsFlag int32 - partitionAssignmentsFlag string - replicasFlag int16 - noHeaderFlag bool - compactFlag bool -) - -func init() { - rootCmd.AddCommand(topicCmd) - rootCmd.AddCommand(topicsCmd) - topicCmd.AddCommand(createTopicCmd) - topicCmd.AddCommand(deleteTopicCmd) - topicCmd.AddCommand(lsTopicsCmd) - topicCmd.AddCommand(describeTopicCmd) - topicCmd.AddCommand(addConfigCmd) - topicCmd.AddCommand(removeConfigCmd) - topicCmd.AddCommand(topicSetConfig) - topicCmd.AddCommand(updateTopicCmd) - topicCmd.AddCommand(lagCmd) - - createTopicCmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(1), "Number of partitions") - createTopicCmd.Flags().Int16VarP(&replicasFlag, "replicas", "r", int16(1), "Number of replicas") - createTopicCmd.Flags().BoolVar(&compactFlag, "compact", false, "Enable topic compaction") - - lsTopicsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") - topicsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") - updateTopicCmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(-1), "Number of partitions") - updateTopicCmd.Flags().StringVar(&partitionAssignmentsFlag, "partition-assignments", "", "Partition Assignments. Optional. If set in combination with -p, an assignment must be provided for each new partition. Example: '[[1,2,3],[1,2,3]]' (JSON Array syntax) assigns two new partitions to brokers 1,2,3. If used by itself, a reassignment must be provided for all partitions.") -} - -var topicCmd = &cobra.Command{ - Use: "topic", - Short: "Create and describe topics.", -} - -var topicsCmd = &cobra.Command{ - Use: "topics", - Short: "List topics", - Run: lsTopicsCmd.Run, -} - -var topicSetConfig = &cobra.Command{ - Use: "set-config", - Short: "set topic config. requires Kafka >=2.3.0 on broker side and kaf cluster config.", - Example: "kaf topic set-config topic.name \"cleanup.policy=delete\"", - Args: cobra.ExactArgs(2), - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topic := args[0] - - splt := strings.Split(args[1], ",") - configs := make(map[string]sarama.IncrementalAlterConfigsEntry) - - for _, kv := range splt { - s := strings.Split(kv, "=") - - if len(s) != 2 { - continue - } - - key := s[0] - value := s[1] - configs[key] = sarama.IncrementalAlterConfigsEntry{ - Operation: sarama.IncrementalAlterConfigsOperationSet, - Value: &value, - } - } - - if len(configs) < 1 { - errorExit("No valid configs found") - } - - err := admin.IncrementalAlterConfig(sarama.TopicResource, topic, configs, false) - if err != nil { - errorExit("Unable to alter topic config: %v\n", err) - } - fmt.Printf("\xE2\x9C\x85 Updated config.") - }, -} - -var updateTopicCmd = &cobra.Command{ - Use: "update", - Short: "Update topic", - Example: "kaf topic update -p 5 --partition-assignments '[[1,2,3],[1,2,3]]'", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - if partitionsFlag == -1 && partitionAssignmentsFlag == "" { - errorExit("Number of partitions and/or partition assigments must be given") - } - - var assignments [][]int32 - if partitionAssignmentsFlag != "" { - if err := json.Unmarshal([]byte(partitionAssignmentsFlag), &assignments); err != nil { - errorExit("Invalid partition assignments: %v", err) - } - } - - if partitionsFlag != int32(-1) { - err := admin.CreatePartitions(args[0], partitionsFlag, assignments, false) - if err != nil { - errorExit("Failed to create partitions: %v", err) - } - } else { - // Needs at least Kafka version 2.4.0. - err := admin.AlterPartitionReassignments(args[0], assignments) - if err != nil { - errorExit("Failed to reassign the partition assigments: %v", err) - } - } - fmt.Printf("\xE2\x9C\x85 Updated topic!\n") - }, -} - -var lsTopicsCmd = &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List topics", - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topics, err := admin.ListTopics() - if err != nil { - errorExit("Unable to list topics: %v\n", err) - } - - sortedTopics := make( - []struct { - name string - sarama.TopicDetail - }, len(topics)) - - i := 0 - for name, topic := range topics { - sortedTopics[i].name = name - sortedTopics[i].TopicDetail = topic - i++ - } - - sort.Slice(sortedTopics, func(i int, j int) bool { - return sortedTopics[i].name < sortedTopics[j].name - }) - - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - - if !noHeaderFlag { - fmt.Fprintf(w, "NAME\tPARTITIONS\tREPLICAS\t\n") - } - - for _, topic := range sortedTopics { - fmt.Fprintf(w, "%v\t%v\t%v\t\n", topic.name, topic.NumPartitions, topic.ReplicationFactor) - } - w.Flush() - }, -} - -var describeTopicCmd = &cobra.Command{ - Use: "describe", - Short: "Describe topic", - Long: "Describe a topic. Default values of the configuration are omitted.", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validTopicArgs, - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topicDetails, err := admin.DescribeTopics([]string{args[0]}) - if err != nil { - errorExit("Unable to describe topics: %v\n", err) - } - - if topicDetails[0].Err == sarama.ErrUnknownTopicOrPartition { - fmt.Printf("Topic %v not found.\n", args[0]) - return - } - - cfg, err := admin.DescribeConfig(sarama.ConfigResource{ - Type: sarama.TopicResource, - Name: args[0], - }) - if err != nil { - errorExit("Unable to describe config: %v\n", err) - } - - var compacted bool - for _, e := range cfg { - if e.Name == "cleanup.policy" { - for _, setting := range strings.Split(e.Value, ",") { - if setting == "compact" { - compacted = true - } - } - } - } - - detail := topicDetails[0] - sort.Slice(detail.Partitions, func(i, j int) bool { return detail.Partitions[i].ID < detail.Partitions[j].ID }) - - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - fmt.Fprintf(w, "Name:\t%v\t\n", detail.Name) - fmt.Fprintf(w, "Internal:\t%v\t\n", detail.IsInternal) - fmt.Fprintf(w, "Compacted:\t%v\t\n", compacted) - fmt.Fprintf(w, "Partitions:\n") - - w.Flush() - w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) - - fmt.Fprintf(w, "\tPartition\tHigh Watermark\tLeader\tReplicas\tISR\t\n") - fmt.Fprintf(w, "\t---------\t--------------\t------\t--------\t---\t\n") - - partitions := make([]int32, 0, len(detail.Partitions)) - for _, partition := range detail.Partitions { - partitions = append(partitions, partition.ID) - } - highWatermarks := getHighWatermarks(args[0], partitions) - highWatermarksSum := 0 - - for _, partition := range detail.Partitions { - sortedReplicas := partition.Replicas - sort.Slice(sortedReplicas, func(i, j int) bool { return sortedReplicas[i] < sortedReplicas[j] }) - - sortedISR := partition.Isr - sort.Slice(sortedISR, func(i, j int) bool { return sortedISR[i] < sortedISR[j] }) - - highWatermarksSum += int(highWatermarks[partition.ID]) - - fmt.Fprintf(w, "\t%v\t%v\t%v\t%v\t%v\t\n", partition.ID, highWatermarks[partition.ID], partition.Leader, sortedReplicas, sortedISR) - } - - w.Flush() - - fmt.Fprintf(w, "Summed HighWatermark:\t%d\n", highWatermarksSum) - w.Flush() - - fmt.Fprintf(w, "Config:\n") - fmt.Fprintf(w, "\tName\tValue\tReadOnly\tSensitive\t\n") - fmt.Fprintf(w, "\t----\t-----\t--------\t---------\t\n") - - for _, entry := range cfg { - if entry.Default { - continue - } - fmt.Fprintf(w, "\t%v\t%v\t%v\t%v\t\n", entry.Name, entry.Value, entry.ReadOnly, entry.Sensitive) - } - - w.Flush() - }, -} - -var createTopicCmd = &cobra.Command{ - Use: "create TOPIC", - Short: "Create a topic", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topicName := args[0] - compact := "delete" - if compactFlag { - compact = "compact" - } - err := admin.CreateTopic(topicName, &sarama.TopicDetail{ - NumPartitions: partitionsFlag, - ReplicationFactor: replicasFlag, - ConfigEntries: map[string]*string{ - "cleanup.policy": &compact, - }, - }, false) - if err != nil { - errorExit("Could not create topic %v: %v\n", topicName, err.Error()) - } else { - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - fmt.Fprintf(w, "\xE2\x9C\x85 Created topic!\n") - fmt.Fprintln(w, "\tTopic Name:\t", topicName) - fmt.Fprintln(w, "\tPartitions:\t", partitionsFlag) - fmt.Fprintln(w, "\tReplication Factor:\t", replicasFlag) - fmt.Fprintln(w, "\tCleanup Policy:\t", compact) - w.Flush() - } - }, -} - -var addConfigCmd = &cobra.Command{ - Use: "add-config TOPIC KEY VALUE", - Short: "Add config key/value pair to topic", - Args: cobra.ExactArgs(3), // TODO how to unset ? support empty VALUE ? - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topic := args[0] - key := args[1] - value := args[2] - - err := admin.AlterConfig(sarama.TopicResource, topic, map[string]*string{ - key: &value, - }, false) - if err != nil { - errorExit("failed to update topic config: %v", err) - } else { - fmt.Printf("Added config %v=%v to topic %v.\n", key, value, topic) - } - }, -} - -var removeConfigCmd = &cobra.Command{ - Use: "rm-config TOPIC ATTR1,ATTR2...", - Short: "Remove attributes from topic", - Args: cobra.ExactArgs(2), - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topic := args[0] - attrsToRemove := strings.Split(args[1], ",") - - updatedTopicConfigs := make(map[string]*string) - - allTopicConfigs, err := admin.DescribeConfig(sarama.ConfigResource{ - Type: sarama.TopicResource, - Name: topic, - }) - if err != nil { - errorExit("failed to describe topic config: %v", err) - } - - for _, v := range allTopicConfigs { - if !slices.Contains(attrsToRemove, v.Name) { - updatedTopicConfigs[v.Name] = &v.Value - } - } - - err = admin.AlterConfig(sarama.TopicResource, topic, updatedTopicConfigs, false) - if err != nil { - errorExit("failed to remove attributes from topic config: %v", err) - } - fmt.Printf("Removed attributes %v from topic %v.\n", attrsToRemove, topic) - }, -} - -var deleteTopicCmd = &cobra.Command{ - Use: "delete TOPIC", - Short: "Delete a topic", - Args: cobra.ExactArgs(1), - ValidArgsFunction: validTopicArgs, - Run: func(cmd *cobra.Command, args []string) { - admin := getClusterAdmin() - - topicName := args[0] - err := admin.DeleteTopic(topicName) - if err != nil { - errorExit("Could not delete topic %v: %v\n", topicName, err.Error()) - } else { - fmt.Fprintf(outWriter, "\xE2\x9C\x85 Deleted topic %v!\n", topicName) - } - }, -} - -var lagCmd = &cobra.Command{ - Use: "lag", - Short: "Display the total lags for each consumer group", - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - topic := args[0] - admin := getClusterAdmin() - defer admin.Close() - - // Describe the topic - topicDetails, err := admin.DescribeTopics([]string{topic}) - if err != nil || len(topicDetails) == 0 { - errorExit("Unable to describe topics: %v\n", err) - } - - // Get the list of partitions for the topic - partitions := make([]int32, 0, len(topicDetails[0].Partitions)) - for _, partition := range topicDetails[0].Partitions { - partitions = append(partitions, partition.ID) - } - highWatermarks := getHighWatermarks(topic, partitions) - - // List all consumer groups - consumerGroups, err := admin.ListConsumerGroups() - if err != nil { - errorExit("Unable to list consumer groups: %v\n", err) - } - - var groups []string - for group := range consumerGroups { - groups = append(groups, group) - } - - // Describe all consumer groups - groupsInfo, err := admin.DescribeConsumerGroups(groups) - if err != nil { - errorExit("Unable to describe consumer groups: %v\n", err) - } - - // Calculate lag for each group - lagInfo := make(map[string]int64) - groupStates := make(map[string]string) // To store the state of each group - for _, group := range groupsInfo { - var sum int64 - show := false - for _, member := range group.Members { - assignment, err := member.GetMemberAssignment() - if err != nil || assignment == nil { - continue - } - - metadata, err := member.GetMemberMetadata() - if err != nil || metadata == nil { - continue - } - - if topicPartitions, exist := assignment.Topics[topic]; exist { - show = true - resp, err := admin.ListConsumerGroupOffsets(group.GroupId, map[string][]int32{topic: topicPartitions}) - if err != nil { - fmt.Fprintf(os.Stderr, "Error fetching offsets for group %s: %v\n", group.GroupId, err) - continue - } - - if blocks, ok := resp.Blocks[topic]; ok { - for pid, block := range blocks { - if hwm, ok := highWatermarks[pid]; ok { - if block.Offset > hwm { - fmt.Fprintf(os.Stderr, "Warning: Consumer offset (%d) is greater than high watermark (%d) for partition %d in group %s\n", block.Offset, hwm, pid, group.GroupId) - } else if block.Offset < 0 { - // Skip partitions with negative offsets - } else { - sum += hwm - block.Offset - } - } - } - } - } - } - - if show && sum >= 0 { - lagInfo[group.GroupId] = sum - groupStates[group.GroupId] = group.State // Store the state of the group - } - } - - // Print the lag information along with group state - w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) - if !noHeaderFlag { - fmt.Fprintf(w, "GROUP ID\tSTATE\tLAG\n") - } - for group, lag := range lagInfo { - fmt.Fprintf(w, "%v\t%v\t%v\n", group, groupStates[group], lag) - } - w.Flush() - }, -} diff --git a/cmd/kaf/topic_test.go b/cmd/kaf/topic_test.go deleted file mode 100644 index be0a2146..00000000 --- a/cmd/kaf/topic_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestTopic(t *testing.T) { - newTopic := fmt.Sprintf("new-topic-%d", time.Now().Unix()) - - t.Run("ls before new topic", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "ls") - require.NotContains(t, out, newTopic) - }) - - t.Run("create new topic", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "create", newTopic) - require.Contains(t, out, "Created topic!") - require.Contains(t, out, newTopic) - }) - - t.Run("ls", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "ls") - require.Contains(t, out, newTopic) - }) - - t.Run("describe", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "describe", newTopic) - require.Contains(t, out, newTopic) - }) - - t.Run("delete", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "delete", newTopic) - require.Contains(t, out, fmt.Sprintf("Deleted topic %s!", newTopic)) - }) - - t.Run("ls after deleted", func(t *testing.T) { - out := runCmdWithBroker(t, nil, "topic", "ls") - require.NotContains(t, out, newTopic) - }) -} diff --git a/cmd/kaf/topics_test.go b/cmd/kaf/topics_test.go deleted file mode 100644 index c4c7b2c6..00000000 --- a/cmd/kaf/topics_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestTopics(t *testing.T) { - out := runCmdWithBroker(t, nil, "topics") - require.Contains(t, out, "kaf-testing") - require.Contains(t, out, "gnomock-kafka") -} diff --git a/cmd/kaf/valid.go b/cmd/kaf/valid.go deleted file mode 100644 index c58d73e3..00000000 --- a/cmd/kaf/valid.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "github.com/spf13/cobra" -) - -func validConfigArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - clusterList := make([]string, 0, len(cfg.Clusters)) - for _, cluster := range cfg.Clusters { - clusterList = append(clusterList, cluster.Name) - } - return clusterList, cobra.ShellCompDirectiveNoFileComp -} - -func validGroupArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - admin := getClusterAdmin() - - groups, err := admin.ListConsumerGroups() - if err != nil { - errorExit("Unable to list consumer groups: %v\n", err) - } - groupList := make([]string, 0, len(groups)) - for grp := range groups { - groupList = append(groupList, grp) - } - return groupList, cobra.ShellCompDirectiveNoFileComp -} - -func validTopicArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - admin := getClusterAdmin() - - topics, err := admin.ListTopics() - if err != nil { - errorExit("Unable to list topics: %v\n", err) - } - topicList := make([]string, 0, len(topics)) - for topic := range topics { - topicList = append(topicList, topic) - } - return topicList, cobra.ShellCompDirectiveNoFileComp -} diff --git a/go.mod b/go.mod index e4ff5df9..b4cf499b 100644 --- a/go.mod +++ b/go.mod @@ -1,35 +1,35 @@ module github.com/birdayz/kaf -go 1.22 +go 1.24.0 require ( - github.com/IBM/sarama v1.43.2 - github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891 - github.com/Masterminds/sprig v2.22.0+incompatible - github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 + github.com/Masterminds/sprig/v3 v3.3.0 github.com/aws/aws-sdk-go-v2/config v1.27.39 - github.com/golang/protobuf v1.5.4 + github.com/bufbuild/protocompile v0.14.1 github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e - github.com/jhump/protoreflect v1.16.0 github.com/linkedin/goavro/v2 v2.13.1 - github.com/magiconair/properties v1.8.7 + github.com/magiconair/properties v1.8.10 github.com/manifoldco/promptui v0.9.0 github.com/mattn/go-colorable v0.1.13 - github.com/mitchellh/go-homedir v1.1.0 - github.com/orlangure/gnomock v0.28.0 github.com/spf13/cobra v1.8.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go/modules/redpanda v0.40.0 + github.com/twmb/franz-go v1.20.6 + github.com/twmb/franz-go/pkg/kadm v1.17.2 + github.com/twmb/franz-go/pkg/kmsg v1.12.0 github.com/vmihailenco/msgpack/v5 v5.4.1 - github.com/xdg/scram v1.0.5 - golang.org/x/oauth2 v0.18.0 - gopkg.in/yaml.v2 v2.4.0 + golang.org/x/oauth2 v0.35.0 + golang.org/x/sync v0.19.0 + google.golang.org/protobuf v1.36.11 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/aws/aws-sdk-go-v2 v1.31.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.37 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect @@ -42,56 +42,67 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 // indirect github.com/aws/smithy-go v1.21.0 // indirect - github.com/bufbuild/protocompile v0.10.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.9+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect - github.com/eapache/queue v1.1.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v28.5.1+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.8.4 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jcmturner/aescts/v2 v2.0.0 // indirect - github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect - github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/klauspost/compress v1.18.2 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/segmentio/kafka-go v0.4.39 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/testcontainers/testcontainers-go v0.40.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/xdg/stringprep v1.0.3 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/sys v0.39.0 // indirect + google.golang.org/grpc v1.79.1 // indirect ) diff --git a/go.sum b/go.sum index 52d9f3d8..5c996d73 100644 --- a/go.sum +++ b/go.sum @@ -1,19 +1,17 @@ -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= -github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891 h1:DeXNO7Cb5W1ofU/xPjDE1kg6JL21vYZGow54ywwTKBA= -github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891/go.mod h1:IwIgXaypux+daBFS0gWtSfpSD38wK3mUzVBkX3Kneoo= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= -github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= @@ -40,96 +38,86 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 h1:VzudTFrDCIDakXtemR7l6Qzt2+JY github.com/aws/aws-sdk-go-v2/service/sts v1.31.3/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= -github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= -github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= -github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e h1:0aewS5NTyxftZHSnFaJmWE5oCCrj4DyEXkAiMa1iZJM= github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= -github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= -github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= -github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= -github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= -github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= -github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= -github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= -github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= -github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= -github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/linkedin/goavro/v2 v2.13.1 h1:4qZ5M0QzQFDRqccsroJlgOJznqAS/TpdvXg55h429+I= github.com/linkedin/goavro/v2 v2.13.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -139,167 +127,148 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/orlangure/gnomock v0.28.0 h1:3xlGullCJxjWjWGjEXUzvGH1tP6nXL0HY/lHt9w8oC8= -github.com/orlangure/gnomock v0.28.0/go.mod h1:mPcZ4UaVkWrN5pdOkkNWtaWwiTA/4KMME9pH/IHg5Gc= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/segmentio/kafka-go v0.4.39 h1:75smaomhvkYRwtuOwqLsdhgCG30B82NsbdkdDfFbvrw= -github.com/segmentio/kafka-go v0.4.39/go.mod h1:T0MLgygYvmqmBvC+s8aCcbVNfJN4znVne5j0Pzowp/Q= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= +github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= +github.com/testcontainers/testcontainers-go/modules/redpanda v0.40.0 h1:B8f4pGYc2aRlG/3aEEdn/jqLfJL3+q8xAPJypxk2ttg= +github.com/testcontainers/testcontainers-go/modules/redpanda v0.40.0/go.mod h1:PFyDDGtSHEsVmWFzqKudRh1dRBRLywmAgFqtcUatA78= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/twmb/franz-go v1.20.6 h1:TpQTt4QcixJ1cHEmQGPOERvTzo99s8jAutmS7rbSD6w= +github.com/twmb/franz-go v1.20.6/go.mod h1:u+FzH2sInp7b9HNVv2cZN8AxdXy6y/AQ1Bkptu4c0FM= +github.com/twmb/franz-go/pkg/kadm v1.17.2 h1:g5f1sAxnTkYC6G96pV5u715HWhxd66hWaDZUAQ8xHY8= +github.com/twmb/franz-go/pkg/kadm v1.17.2/go.mod h1:ST55zUB+sUS+0y+GcKY/Tf1XxgVilaFpB9I19UubLmU= +github.com/twmb/franz-go/pkg/kmsg v1.12.0 h1:CbatD7ers1KzDNgJqPbKOq0Bz/WLBdsTH75wgzeVaPc= +github.com/twmb/franz-go/pkg/kmsg v1.12.0/go.mod h1:+DPt4NC8RmI6hqb8G09+3giKObE6uD2Eya6CfqBpeJY= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= -github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= -github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= +golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002 h1:V7Da7qt0MkY3noVANIMVBk28nOnijADeOR3i5Hcvpj4= -google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= diff --git a/pkg/app/app.go b/pkg/app/app.go new file mode 100644 index 00000000..2cf928c3 --- /dev/null +++ b/pkg/app/app.go @@ -0,0 +1,217 @@ +package app + +import ( + "fmt" + "io" + "os" + "text/tabwriter" + + "github.com/hokaccha/go-prettyjson" + "github.com/mattn/go-colorable" + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/birdayz/kaf/pkg/avro" + kafclient "github.com/birdayz/kaf/pkg/client" + "github.com/birdayz/kaf/pkg/config" + "github.com/birdayz/kaf/pkg/group" + "github.com/birdayz/kaf/pkg/proto" + "github.com/birdayz/kaf/pkg/topic" +) + +// App holds all shared mutable state for the CLI. It is created once per +// invocation and threaded into every command package. +type App struct { + // I/O + OutWriter io.Writer + ErrWriter io.Writer + InReader io.Reader + ColorableOut io.Writer + + // Config state + Cfg config.Config + CurrentCluster *config.Cluster + CfgFile string + ClusterOverride string + BrokersFlag []string + SchemaRegistryURL string + + // Shared decode state + SchemaCache *avro.SchemaCache + Reg *proto.DescriptorRegistry + Keyfmt *prettyjson.Formatter + ProtoType string + KeyProtoType string + ProtoFiles []string + ProtoExclude []string + DecodeMsgPack bool + + // Display + NoHeaderFlag bool + + // Root command reference (for completion generation) + Root *cobra.Command +} + +// New creates an App with sane defaults. +func New() *App { + keyfmt := prettyjson.NewFormatter() + keyfmt.Newline = " " // Replace newline with space to avoid condensed output. + keyfmt.Indent = 0 + + return &App{ + OutWriter: os.Stdout, + ErrWriter: os.Stderr, + InReader: os.Stdin, + ColorableOut: colorable.NewColorableStdout(), + Keyfmt: keyfmt, + } +} + +// InitConfig reads the config file and resolves the active cluster. +// Called by PersistentPreRunE on the root command. +func (a *App) InitConfig() error { + var err error + a.Cfg, err = config.ReadConfig(a.CfgFile) + if err != nil { + return fmt.Errorf("invalid config: %w", err) + } + + a.Cfg.ClusterOverride = a.ClusterOverride + + cluster := a.Cfg.ActiveCluster() + if cluster != nil { + a.CurrentCluster = cluster + } else { + a.CurrentCluster = &config.Cluster{ + Brokers: []string{"localhost:9092"}, + } + } + + if a.SchemaRegistryURL != "" { + a.CurrentCluster.SchemaRegistryURL = a.SchemaRegistryURL + a.CurrentCluster.SchemaRegistryCredentials = nil + } + + if a.BrokersFlag != nil { + a.CurrentCluster.Brokers = a.BrokersFlag + } + + return nil +} + +// NewKafClient creates a franz-go based client from the current cluster config. +func (a *App) NewKafClient(opts ...kgo.Opt) (*kafclient.Client, error) { + cl, warnings, err := kafclient.New(a.CurrentCluster, opts...) + if err != nil { + return nil, fmt.Errorf("unable to create client: %w", err) + } + for _, w := range warnings { + fmt.Fprintf(a.ErrWriter, "WARNING: %s\n", w) + } + return cl, nil +} + +// NewSchemaCache creates a schema cache from the current cluster config. +// Returns (nil, nil) if no schema registry is configured. +func (a *App) NewSchemaCache() (*avro.SchemaCache, error) { + if a.CurrentCluster.SchemaRegistryURL == "" { + return nil, nil + } + var username, password string + if creds := a.CurrentCluster.SchemaRegistryCredentials; creds != nil { + username = creds.Username + password = creds.Password + } + cache, warnings, err := avro.NewSchemaCache(a.CurrentCluster.SchemaRegistryURL, username, password) + if err != nil { + return nil, fmt.Errorf("unable to get schema cache: %w", err) + } + for _, w := range warnings { + fmt.Fprintf(a.ErrWriter, "WARNING: %s\n", w) + } + return cache, nil +} + +// AddProtoFlags installs the shared protobuf flags on cmd. +func (a *App) AddProtoFlags(cmd *cobra.Command) { + cmd.Flags().StringSliceVar(&a.ProtoFiles, "proto-include", []string{}, "Path to proto files") + cmd.Flags().StringSliceVar(&a.ProtoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") + cmd.Flags().StringVar(&a.ProtoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") + cmd.Flags().StringVar(&a.KeyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") +} + +// SetupProtoDescriptorRegistry loads proto files if --proto-type or --key-proto-type is set. +func (a *App) SetupProtoDescriptorRegistry(cmd *cobra.Command, args []string) error { + if a.ProtoType != "" || a.KeyProtoType != "" { + r, err := proto.NewDescriptorRegistry(a.ProtoFiles, a.ProtoExclude) + if err != nil { + return fmt.Errorf("failed to load protobuf files: %w", err) + } + a.Reg = r + } + return nil +} + +// AddNoHeadersFlag installs --no-headers on cmd. +func (a *App) AddNoHeadersFlag(cmd *cobra.Command) { + cmd.Flags().BoolVar(&a.NoHeaderFlag, "no-headers", false, "Hide table headers") +} + +// ValidTopicArgs provides shell completion for topic names. +func (a *App) ValidTopicArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + cl, err := a.NewKafClient() + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + defer cl.Close() + + topics, err := topic.List(cmd.Context(), cl.Admin) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + names := make([]string, 0, len(topics)) + for _, t := range topics { + names = append(names, t.Name) + } + return names, cobra.ShellCompDirectiveNoFileComp +} + +// ValidGroupArgs provides shell completion for group names. +func (a *App) ValidGroupArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + cl, err := a.NewKafClient() + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + defer cl.Close() + + names, err := group.ListGroupNames(cmd.Context(), cl.Admin) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + return names, cobra.ShellCompDirectiveNoFileComp +} + +// ValidConfigArgs provides shell completion for cluster names. +func (a *App) ValidConfigArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + clusterList := make([]string, 0, len(a.Cfg.Clusters)) + for _, cluster := range a.Cfg.Clusters { + clusterList = append(clusterList, cluster.Name) + } + return clusterList, cobra.ShellCompDirectiveNoFileComp +} + +const ( + TabwriterMinWidth = 6 + TabwriterMinWidthNested = 2 + TabwriterWidth = 4 + TabwriterPadding = 3 + TabwriterPadChar = ' ' + TabwriterFlags = 0 +) + +// NewTabWriter creates a standard tabwriter for CLI output. +func NewTabWriter(w io.Writer) *tabwriter.Writer { + return tabwriter.NewWriter(w, TabwriterMinWidth, TabwriterWidth, TabwriterPadding, TabwriterPadChar, TabwriterFlags) +} + diff --git a/pkg/app/format.go b/pkg/app/format.go new file mode 100644 index 00000000..de370f8c --- /dev/null +++ b/pkg/app/format.go @@ -0,0 +1,99 @@ +package app + +import ( + "encoding/json" + "fmt" + + "github.com/hokaccha/go-prettyjson" + "github.com/spf13/cobra" +) + +// OutputFormat controls how consumed messages are printed. +type OutputFormat string + +const ( + OutputFormatDefault OutputFormat = "default" + OutputFormatRaw OutputFormat = "raw" + OutputFormatJSON OutputFormat = "json" + OutputFormatJSONEachRow OutputFormat = "json-each-row" + OutputFormatHex OutputFormat = "hex" +) + +func (e *OutputFormat) String() string { + return string(*e) +} + +func (e *OutputFormat) Set(v string) error { + switch v { + case "default", "raw", "json", "json-each-row", "hex": + *e = OutputFormat(v) + return nil + default: + return fmt.Errorf("must be one of: default, raw, json, json-each-row, hex") + } +} + +func (e *OutputFormat) Type() string { + return "OutputFormat" +} + +// CompleteOutputFormat provides shell completion for --output. +func CompleteOutputFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"default", "raw", "json", "json-each-row", "hex"}, cobra.ShellCompDirectiveNoFileComp +} + +// InputFormat controls how produced messages are read. +type InputFormat string + +const ( + InputFormatDefault InputFormat = "default" + InputFormatJSONEachRow InputFormat = "json-each-row" + InputFormatHex InputFormat = "hex" +) + +func (e *InputFormat) String() string { + return string(*e) +} + +func (e *InputFormat) Set(v string) error { + switch v { + case "default", "json-each-row", "hex": + *e = InputFormat(v) + return nil + default: + return fmt.Errorf("must be one of: default, json-each-row, hex") + } +} + +func (e *InputFormat) Type() string { + return "InputFormat" +} + +// CompleteInputFormat provides shell completion for --input. +func CompleteInputFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"default", "json-each-row", "hex"}, cobra.ShellCompDirectiveNoFileComp +} + +// IsJSON returns true if data is valid JSON. +func IsJSON(data []byte) bool { + var i any + return json.Unmarshal(data, &i) == nil +} + +// FormatJSON unmarshals data to an interface for JSON re-encoding. +// Returns the string representation if not valid JSON. +func FormatJSON(data []byte) any { + var i any + if err := json.Unmarshal(data, &i); err != nil { + return string(data) + } + return i +} + +// FormatValue pretty-prints JSON data. +func FormatValue(data []byte) []byte { + if b, err := prettyjson.Format(data); err == nil { + return b + } + return data +} diff --git a/pkg/app/message.go b/pkg/app/message.go new file mode 100644 index 00000000..b6c71f28 --- /dev/null +++ b/pkg/app/message.go @@ -0,0 +1,266 @@ +package app + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/vmihailenco/msgpack/v5" + goproto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/birdayz/kaf/pkg/proto" +) + +// MessageHeader is a JSON-serializable record header. +type MessageHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// JSONEachRowMessage is the wire format for --output json-each-row / --input json-each-row. +type JSONEachRowMessage struct { + Topic string `json:"topic"` + Partition int32 `json:"partition"` + Offset int64 `json:"offset"` + Timestamp time.Time `json:"timestamp"` + Headers []MessageHeader `json:"headers"` + Key string `json:"key"` + Payload string `json:"payload"` +} + +// HandleMessage decodes, formats, and prints a single consumed record. +// It is used by both the consume command and the group peek command. +func (a *App) HandleMessage(rec *kgo.Record, mu *sync.Mutex, outputFmt OutputFormat, headerFilter map[string]string) { + if !CheckHeaders(rec.Headers, headerFilter) { + return + } + + var stderr bytes.Buffer + + var dataToDisplay []byte + var keyToDisplay []byte + var err error + + if a.ProtoType != "" { + dataToDisplay, err = ProtoDecode(a.Reg, rec.Value, a.ProtoType) + if err != nil { + fmt.Fprintf(&stderr, "failed to decode proto. falling back to binary output. Error: %v\n", err) + } + } else { + dataToDisplay, err = a.AvroDecode(rec.Value) + if err != nil { + fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) + } + } + + if a.KeyProtoType != "" { + keyToDisplay, err = ProtoDecode(a.Reg, rec.Key, a.KeyProtoType) + if err != nil { + fmt.Fprintf(&stderr, "failed to decode proto key. falling back to binary output. Error: %v\n", err) + } + } else { + keyToDisplay, err = a.AvroDecode(rec.Key) + if err != nil { + fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) + } + } + + if a.DecodeMsgPack { + var obj any + err = msgpack.Unmarshal(rec.Value, &obj) + if err != nil { + fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) + } + + dataToDisplay, err = json.Marshal(obj) + if err != nil { + fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) + } + } + + dataToDisplay = a.FormatMessage(rec, dataToDisplay, keyToDisplay, &stderr, outputFmt) + + mu.Lock() + stderr.WriteTo(a.ErrWriter) + _, _ = a.ColorableOut.Write(dataToDisplay) + fmt.Fprintln(a.OutWriter) + mu.Unlock() +} + +// FormatMessage renders a record according to the output format. +func (a *App) FormatMessage(rec *kgo.Record, rawMessage []byte, keyToDisplay []byte, stderr *bytes.Buffer, outputFmt OutputFormat) []byte { + switch outputFmt { + case OutputFormatRaw: + return rawMessage + case OutputFormatJSON: + jsonMessage := make(map[string]any) + jsonMessage["partition"] = rec.Partition + jsonMessage["offset"] = rec.Offset + jsonMessage["timestamp"] = rec.Timestamp + if len(rec.Headers) > 0 { + jsonMessage["headers"] = rec.Headers + } + jsonMessage["key"] = FormatJSON(keyToDisplay) + jsonMessage["payload"] = FormatJSON(rawMessage) + + jsonToDisplay, err := json.Marshal(jsonMessage) + if err != nil { + fmt.Fprintf(stderr, "could not decode JSON data: %v", err) + } + return jsonToDisplay + case OutputFormatJSONEachRow: + jsonMessage := JSONEachRowMessage{ + Topic: rec.Topic, + Partition: rec.Partition, + Offset: rec.Offset, + Timestamp: rec.Timestamp, + Headers: make([]MessageHeader, len(rec.Headers)), + Key: string(keyToDisplay), + Payload: string(rawMessage), + } + for i, hdr := range rec.Headers { + jsonMessage.Headers[i] = MessageHeader{ + Key: hdr.Key, + Value: ParseHeader(hdr.Value), + } + } + jsonToDisplay, err := json.Marshal(jsonMessage) + if err != nil { + fmt.Fprintf(stderr, "could not decode JSON data: %v", err) + } + return jsonToDisplay + case OutputFormatHex: + return []byte(hex.EncodeToString(rawMessage)) + default: + if IsJSON(rawMessage) { + rawMessage = FormatValue(rawMessage) + } + if IsJSON(keyToDisplay) { + keyToDisplay = a.FormatKey(keyToDisplay) + } + + w := NewTabWriter(stderr) + if len(rec.Headers) > 0 { + fmt.Fprintf(w, "Headers:\n") + } + for _, hdr := range rec.Headers { + fmt.Fprintf(w, "\tKey: %v\tValue: %v\n", hdr.Key, ParseHeader(hdr.Value)) + } + if len(rec.Key) > 0 { + fmt.Fprintf(w, "Key:\t%v\n", string(keyToDisplay)) + } + fmt.Fprintf(w, "Partition:\t%v\nOffset:\t%v\nTimestamp:\t%v\n", rec.Partition, rec.Offset, rec.Timestamp) + w.Flush() + + return rawMessage + } +} + +// FormatKey pretty-prints a JSON key using the compact formatter. +func (a *App) FormatKey(key []byte) []byte { + if b, err := a.Keyfmt.Format(key); err == nil { + return b + } + return key +} + +// AvroDecode attempts Avro decoding if a schema cache is set. +func (a *App) AvroDecode(b []byte) ([]byte, error) { + if a.SchemaCache != nil { + return a.SchemaCache.DecodeMessage(b) + } + return b, nil +} + +// ProtoDecode decodes protobuf bytes to JSON using the given registry. +func ProtoDecode(reg *proto.DescriptorRegistry, b []byte, _type string) ([]byte, error) { + dynamicMessage := reg.MessageForType(_type) + if dynamicMessage == nil { + return b, nil + } + + if err := goproto.Unmarshal(b, dynamicMessage); err != nil { + return nil, err + } + + jsonBytes, err := protojson.Marshal(dynamicMessage) + if err != nil { + return nil, err + } + return jsonBytes, nil +} + +// ProtoEncode encodes JSON input to protobuf bytes using the given registry. +func (a *App) ProtoEncode(input []byte, msgType string) ([]byte, error) { + dynamicMessage := a.Reg.MessageForType(msgType) + if dynamicMessage == nil { + return nil, fmt.Errorf("proto type %v not found", msgType) + } + if err := protojson.Unmarshal(input, dynamicMessage); err != nil { + return nil, fmt.Errorf("parse input JSON as proto type %v: %w", msgType, err) + } + pbBytes, err := goproto.Marshal(dynamicMessage) + if err != nil { + return nil, fmt.Errorf("marshal proto: %w", err) + } + return pbBytes, nil +} + +// ParseHeader tries to decode azure eventhub-specific header encoding, +// falling back to raw string. +func ParseHeader(hdrBytes []byte) string { + if len(hdrBytes) == 0 { + return "" + } + switch hdrBytes[0] { + case 161: + if len(hdrBytes) < 2 { + return string(hdrBytes) + } + end := 2 + int(hdrBytes[1]) + if end > len(hdrBytes) { + return string(hdrBytes) + } + return string(hdrBytes[2:end]) + case 131: + if len(hdrBytes) < 9 { + return string(hdrBytes) + } + return strconv.FormatUint(binary.BigEndian.Uint64(hdrBytes[1:9]), 10) + default: + return string(hdrBytes) + } +} + +// CheckHeaders returns true if rec headers match all filter entries. +func CheckHeaders(headers []kgo.RecordHeader, filter map[string]string) bool { + if len(filter) == 0 { + return true + } + matchCount := 0 + for _, h := range headers { + if val, ok := filter[h.Key]; ok && ParseHeader(h.Value) == val { + matchCount++ + } + } + return matchCount >= len(filter) +} + +// EndOffsetMapForTopic extracts per-partition end offsets for a single topic. +func EndOffsetMapForTopic(offsets kadm.ListedOffsets, topic string) map[int32]int64 { + m := make(map[int32]int64) + offsets.Each(func(lo kadm.ListedOffset) { + if lo.Topic == topic { + m[lo.Partition] = lo.Offset + } + }) + return m +} diff --git a/pkg/avro/avro_integration_test.go b/pkg/avro/avro_integration_test.go new file mode 100644 index 00000000..841af5e5 --- /dev/null +++ b/pkg/avro/avro_integration_test.go @@ -0,0 +1,293 @@ +package avro + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" +) + +// registerSchema registers an Avro schema under the given subject and returns +// the schema ID assigned by the registry. +func registerSchema(t *testing.T, srURL, subject, schema string) int { + t.Helper() + + body := fmt.Sprintf(`{"schema": %s}`, strconv.Quote(schema)) + resp, err := http.Post( + srURL+"/subjects/"+subject+"/versions", + "application/vnd.schemaregistry.v1+json", + strings.NewReader(body), + ) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode, "schema registration failed") + + var result struct { + ID int `json:"id"` + } + require.NoError(t, json.NewDecoder(resp.Body).Decode(&result)) + require.NotZero(t, result.ID, "schema ID should not be zero") + return result.ID +} + +// startRedpanda creates a fresh Redpanda container and returns the schema +// registry URL. The container is terminated when the test completes. +func startRedpanda(t *testing.T) string { + t.Helper() + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + srURL, err := rpContainer.SchemaRegistryAddress(ctx) + require.NoError(t, err) + return srURL +} + +const simpleSchema = `{"type":"record","name":"Test","fields":[{"name":"name","type":"string"},{"name":"age","type":"int"}]}` + +func TestEncodeDecodeRoundTrip(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + schemaID := registerSchema(t, srURL, "roundtrip-value", simpleSchema) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + input := []byte(`{"name":"Alice","age":30}`) + encoded, err := cache.EncodeMessage(schemaID, input) + require.NoError(t, err) + require.True(t, len(encoded) >= 5, "encoded message must have at least 5-byte header") + assert.Equal(t, byte(0x00), encoded[0], "first byte must be the magic byte") + + decoded, err := cache.DecodeMessage(encoded) + require.NoError(t, err) + + // Compare as parsed JSON to ignore field ordering differences. + var expected, actual map[string]any + require.NoError(t, json.Unmarshal(input, &expected)) + require.NoError(t, json.Unmarshal(decoded, &actual)) + assert.Equal(t, expected, actual) +} + +func TestDecodeNonAvroMessage(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + input := []byte("this is not avro data at all") + output, err := cache.DecodeMessage(input) + require.NoError(t, err) + assert.Equal(t, input, output, "non-Avro message should pass through unchanged") +} + +func TestDecodeShortMessage(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + inputs := [][]byte{ + {}, + {0x00}, + {0x00, 0x01}, + {0x00, 0x01, 0x02}, + {0x00, 0x01, 0x02, 0x03}, + } + + for _, input := range inputs { + output, err := cache.DecodeMessage(input) + require.NoError(t, err) + assert.Equal(t, input, output, "message shorter than 5 bytes should pass through") + } +} + +func TestDecodeMissingMagicByte(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + // 6 bytes, but first byte is not the magic byte 0x00. + input := []byte{0xFF, 0x00, 0x00, 0x00, 0x01, 0x42} + output, err := cache.DecodeMessage(input) + require.NoError(t, err) + assert.Equal(t, input, output, "message without magic byte should pass through") +} + +func TestDecodeInvalidSchemaID(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + // Construct a message with the magic byte but a schema ID that does not + // exist in the registry. + msg := make([]byte, 10) + msg[0] = 0x00 + binary.BigEndian.PutUint32(msg[1:5], 99999) + // Some garbage payload bytes after the header. + copy(msg[5:], []byte{0x01, 0x02, 0x03, 0x04, 0x05}) + + _, err = cache.DecodeMessage(msg) + require.Error(t, err, "decoding with non-existent schema ID should fail") +} + +func TestEncodeInvalidSchemaID(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + _, err = cache.EncodeMessage(99999, []byte(`{"name":"Bob","age":25}`)) + require.Error(t, err, "encoding with non-existent schema ID should fail") +} + +func TestEncodeInvalidJSON(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + schemaID := registerSchema(t, srURL, "invalid-json-value", simpleSchema) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + _, err = cache.EncodeMessage(schemaID, []byte(`not valid json`)) + require.Error(t, err, "encoding invalid JSON should fail") +} + +func TestSchemaCache(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + schemaID := registerSchema(t, srURL, "cache-test-value", simpleSchema) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + msg1 := []byte(`{"name":"CacheTest1","age":10}`) + msg2 := []byte(`{"name":"CacheTest2","age":20}`) + + encoded1, err := cache.EncodeMessage(schemaID, msg1) + require.NoError(t, err) + + encoded2, err := cache.EncodeMessage(schemaID, msg2) + require.NoError(t, err) + + decoded1, err := cache.DecodeMessage(encoded1) + require.NoError(t, err) + + decoded2, err := cache.DecodeMessage(encoded2) + require.NoError(t, err) + + var out1, out2 map[string]any + require.NoError(t, json.Unmarshal(decoded1, &out1)) + require.NoError(t, json.Unmarshal(decoded2, &out2)) + + assert.Equal(t, "CacheTest1", out1["name"]) + assert.Equal(t, float64(10), out1["age"]) + assert.Equal(t, "CacheTest2", out2["name"]) + assert.Equal(t, float64(20), out2["age"]) +} + +func TestComplexSchema(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + srURL := startRedpanda(t) + + complexSchema := `{ + "type": "record", + "name": "User", + "namespace": "com.example", + "fields": [ + {"name": "id", "type": "long"}, + {"name": "username", "type": "string"}, + {"name": "email", "type": ["null", "string"], "default": null}, + {"name": "active", "type": "boolean"}, + {"name": "address", "type": { + "type": "record", + "name": "Address", + "fields": [ + {"name": "street", "type": "string"}, + {"name": "city", "type": "string"}, + {"name": "zip", "type": "string"} + ] + }}, + {"name": "tags", "type": {"type": "array", "items": "string"}} + ] + }` + + schemaID := registerSchema(t, srURL, "complex-value", complexSchema) + + cache, _, err := NewSchemaCache(srURL, "", "") + require.NoError(t, err) + + input := []byte(`{ + "id": 42, + "username": "torvalds", + "email": {"string": "linus@kernel.org"}, + "active": true, + "address": { + "street": "123 Kernel Lane", + "city": "Portland", + "zip": "97201" + }, + "tags": ["linux", "git"] + }`) + + encoded, err := cache.EncodeMessage(schemaID, input) + require.NoError(t, err) + + decoded, err := cache.DecodeMessage(encoded) + require.NoError(t, err) + + var result map[string]any + require.NoError(t, json.Unmarshal(decoded, &result)) + + assert.Equal(t, float64(42), result["id"]) + assert.Equal(t, "torvalds", result["username"]) + assert.Equal(t, true, result["active"]) + + addr, ok := result["address"].(map[string]any) + require.True(t, ok, "address should be a map") + assert.Equal(t, "123 Kernel Lane", addr["street"]) + assert.Equal(t, "Portland", addr["city"]) + assert.Equal(t, "97201", addr["zip"]) + + tags, ok := result["tags"].([]any) + require.True(t, ok, "tags should be an array") + assert.Equal(t, []any{"linux", "git"}, tags) +} diff --git a/pkg/avro/schema.go b/pkg/avro/schema.go index b1ac8552..ea42e46b 100644 --- a/pkg/avro/schema.go +++ b/pkg/avro/schema.go @@ -3,10 +3,14 @@ package avro import ( "encoding/base64" "encoding/binary" + "encoding/json" + "fmt" + "io" "net/http" + "strings" "sync" + "time" - schemaregistry "github.com/Landoop/schema-registry" "github.com/linkedin/goavro/v2" ) @@ -19,47 +23,87 @@ type cachedCodec struct { // SchemaCache connects to the Confluent schema registry and maintains // a cached versions of Avro schemas and codecs. type SchemaCache struct { - client *schemaregistry.Client + baseURL string + httpClient *http.Client mu sync.RWMutex codecsBySchemaID map[int]*cachedCodec } -type transport struct { +// NewSchemaCache returns a new Cache instance. +// warnings are returned as non-fatal diagnostic messages. +func NewSchemaCache(url string, username string, password string) (cache *SchemaCache, warnings []string, err error) { + var warns []string + var encodedCredentials string + if username != "" { + encodedCredentials = base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + } + if encodedCredentials != "" && strings.HasPrefix(url, "http://") { + warns = append(warns, "schema registry credentials sent over plaintext HTTP") + } + + httpClient := &http.Client{ + Timeout: 10 * time.Second, + Transport: &authTransport{ + underlyingTransport: http.DefaultTransport, + encodedCredentials: encodedCredentials, + }, + } + + c := &SchemaCache{ + baseURL: strings.TrimRight(url, "/"), + httpClient: httpClient, + codecsBySchemaID: make(map[int]*cachedCodec), + } + return c, warns, nil +} + +type authTransport struct { underlyingTransport http.RoundTripper encodedCredentials string } // RoundTrip wraps the underlying transport's RoundTripper and injects a // HTTP Basic authentication header if credentials are provided. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { if t.encodedCredentials != "" { - req.Header.Add("Authorization", "Basic "+t.encodedCredentials) + req.Header.Set("Authorization", "Basic "+t.encodedCredentials) } return t.underlyingTransport.RoundTrip(req) } -// NewSchemaCache returns a new Cache instance -func NewSchemaCache(url string, username string, password string) (*SchemaCache, error) { - var encodedCredentials string - if username != "" { - encodedCredentials = base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) +// getSchemaByID fetches a schema string from the Confluent Schema Registry by ID. +func (c *SchemaCache) getSchemaByID(id int) (string, error) { + url := fmt.Sprintf("%s/schemas/ids/%d", c.baseURL, id) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) } - httpClient := &http.Client{Transport: &transport{ - underlyingTransport: http.DefaultTransport, - encodedCredentials: encodedCredentials, - }} + req.Header.Set("Accept", "application/vnd.schemaregistry.v1+json, application/json") - client, err := schemaregistry.NewClient(url, schemaregistry.UsingClient(httpClient)) + resp, err := c.httpClient.Do(req) if err != nil { - return nil, err + return "", fmt.Errorf("fetch schema id %d: %w", id, err) } + defer resp.Body.Close() - c := &SchemaCache{ - codecsBySchemaID: make(map[int]*cachedCodec), - client: client, + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("read schema response: %w", err) } - return c, nil + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("schema registry returned %d for id %d: %s", resp.StatusCode, id, body) + } + + var result struct { + Schema string `json:"schema"` + } + if err := json.Unmarshal(body, &result); err != nil { + return "", fmt.Errorf("decode schema response: %w", err) + } + + return result.Schema, nil } // getCodecForSchemaID returns a goavro codec for transforming data. @@ -89,12 +133,19 @@ func (c *SchemaCache) getCodecForSchemaID(schemaID int) (codec *goavro.Codec, er c.mu.Unlock() defer func() { + if err != nil { + // Don't cache failures -- a transient network blip should not + // permanently poison the cache for this schema ID. + c.mu.Lock() + delete(c.codecsBySchemaID, schemaID) + c.mu.Unlock() + } cc.codec = codec - cc.err = err // Any failure is permanent on a per-schema basis. + cc.err = err close(cc.done) // Promise fulfilled. }() - schema, err := c.client.GetSchemaById(schemaID) + schema, err := c.getSchemaByID(schemaID) if err != nil { return nil, err } diff --git a/pkg/avro/schema_test.go b/pkg/avro/schema_test.go new file mode 100644 index 00000000..b23f219a --- /dev/null +++ b/pkg/avro/schema_test.go @@ -0,0 +1,63 @@ +package avro + +import ( + "encoding/binary" + "testing" +) + +func TestDecodeMessage_NonAvro(t *testing.T) { + // Messages without avro magic byte should be returned as-is. + input := []byte("plain text message") + cache := &SchemaCache{codecsBySchemaID: make(map[int]*cachedCodec)} + out, err := cache.DecodeMessage(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(out) != string(input) { + t.Fatalf("expected passthrough, got %q", out) + } +} + +func TestDecodeMessage_TooShort(t *testing.T) { + // Less than 5 bytes should be returned as-is. + input := []byte{0x00, 0x01, 0x02} + cache := &SchemaCache{codecsBySchemaID: make(map[int]*cachedCodec)} + out, err := cache.DecodeMessage(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(out) != string(input) { + t.Fatalf("expected passthrough for short message") + } +} + +func TestDecodeMessage_NoMagicByte(t *testing.T) { + // 5+ bytes but wrong magic byte should be returned as-is. + input := []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x02} + cache := &SchemaCache{codecsBySchemaID: make(map[int]*cachedCodec)} + out, err := cache.DecodeMessage(input) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(out) != string(input) { + t.Fatalf("expected passthrough for non-magic byte") + } +} + +func TestWireFormatHeader(t *testing.T) { + // Verify that the Confluent wire format header is correctly structured: + // byte 0: magic byte (0x00) + // bytes 1-4: schema ID as big-endian uint32 + schemaID := uint32(42) + header := make([]byte, 5) + header[0] = 0x00 + binary.BigEndian.PutUint32(header[1:5], schemaID) + + if header[0] != 0x00 { + t.Fatal("magic byte should be 0x00") + } + gotID := binary.BigEndian.Uint32(header[1:5]) + if gotID != schemaID { + t.Fatalf("expected schema ID %d, got %d", schemaID, gotID) + } +} diff --git a/pkg/client/auth.go b/pkg/client/auth.go new file mode 100644 index 00000000..01ac3028 --- /dev/null +++ b/pkg/client/auth.go @@ -0,0 +1,96 @@ +package client + +import ( + "context" + "net/http" + "sync" + "time" + + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/birdayz/kaf/pkg/config" + "github.com/twmb/franz-go/pkg/sasl" + "github.com/twmb/franz-go/pkg/sasl/aws" + "github.com/twmb/franz-go/pkg/sasl/oauth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +func oauthMechanism(cluster *config.Cluster) sasl.Mechanism { + s := cluster.SASL + + // Static token: no refresh needed. + if s.Token != "" { + return oauth.Oauth(func(_ context.Context) (oauth.Auth, error) { + return oauth.Auth{Token: s.Token}, nil + }) + } + + // OAuth2 client credentials flow with token caching. + tp := &tokenCache{ + cfg: &clientcredentials.Config{ + ClientID: s.ClientID, + ClientSecret: s.ClientSecret, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }, + refreshBuffer: 20 * time.Second, + } + + return oauth.Oauth(func(ctx context.Context) (oauth.Auth, error) { + tok, err := tp.token(ctx) + if err != nil { + return oauth.Auth{}, err + } + return oauth.Auth{Token: tok}, nil + }) +} + +// awsMSKMechanism uses franz-go's native AWS_MSK_IAM SASL mechanism with +// SigV4 signing. Credentials are loaded from the default AWS credential chain. +func awsMSKMechanism(_ *config.Cluster) (sasl.Mechanism, error) { + return aws.ManagedStreamingIAM(func(ctx context.Context) (aws.Auth, error) { + cfg, err := awsconfig.LoadDefaultConfig(ctx) + if err != nil { + return aws.Auth{}, err + } + creds, err := cfg.Credentials.Retrieve(ctx) + if err != nil { + return aws.Auth{}, err + } + return aws.Auth{ + AccessKey: creds.AccessKeyID, + SecretKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + }, nil + }), nil +} + +// tokenCache provides thread-safe caching of OAuth2 tokens with refresh. +type tokenCache struct { + mu sync.Mutex + cfg *clientcredentials.Config + cachedToken string + replaceAt time.Time + refreshBuffer time.Duration +} + +func (tc *tokenCache) token(ctx context.Context) (string, error) { + tc.mu.Lock() + defer tc.mu.Unlock() + + if tc.cachedToken != "" && time.Now().Before(tc.replaceAt) { + return tc.cachedToken, nil + } + + httpClient := &http.Client{Timeout: 10 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + tok, err := tc.cfg.Token(ctx) + if err != nil { + return "", err + } + + tc.cachedToken = tok.AccessToken + tc.replaceAt = tok.Expiry.Add(-tc.refreshBuffer) + return tc.cachedToken, nil +} diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 00000000..6eadc777 --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,171 @@ +package client + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "strings" + "time" + + "github.com/birdayz/kaf/pkg/config" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/sasl" + "github.com/twmb/franz-go/pkg/sasl/plain" + "github.com/twmb/franz-go/pkg/sasl/scram" +) + +// Client wraps a franz-go kgo.Client and kadm.Client. +type Client struct { + KGO *kgo.Client + Admin *kadm.Client +} + +// Close closes both the admin and kgo clients. +func (c *Client) Close() { + if c.Admin != nil { + c.Admin.Close() + } + if c.KGO != nil { + c.KGO.Close() + } +} + +// New creates a new Client from a Cluster config. +// warnings are returned as non-fatal diagnostic messages (e.g. insecure TLS). +func New(cluster *config.Cluster, opts ...kgo.Opt) (client *Client, warnings []string, err error) { + baseOpts := []kgo.Opt{ + kgo.SeedBrokers(cluster.Brokers...), + kgo.ClientID("kaf"), + kgo.DialTimeout(10 * time.Second), + kgo.RequestTimeoutOverhead(10 * time.Second), + kgo.ConnIdleTimeout(60 * time.Second), + } + + var warns []string + tlsCfg, tlsWarns, err := buildTLS(cluster) + if err != nil { + return nil, nil, fmt.Errorf("TLS config: %w", err) + } + warns = append(warns, tlsWarns...) + if tlsCfg != nil { + baseOpts = append(baseOpts, kgo.DialTLSConfig(tlsCfg)) + } + + saslMech, err := buildSASL(cluster) + if err != nil { + return nil, nil, fmt.Errorf("SASL config: %w", err) + } + if saslMech != nil { + if strings.EqualFold(cluster.SASL.Mechanism, "PLAIN") && tlsCfg == nil { + warns = append(warns, "SASL PLAIN without TLS sends credentials in cleartext") + } + baseOpts = append(baseOpts, kgo.SASL(saslMech)) + } + + baseOpts = append(baseOpts, opts...) + + cl, err := kgo.NewClient(baseOpts...) + if err != nil { + return nil, nil, fmt.Errorf("create kgo client: %w", err) + } + + adm := kadm.NewClient(cl) + + return &Client{ + KGO: cl, + Admin: adm, + }, warns, nil +} + +func buildTLS(cluster *config.Cluster) (*tls.Config, []string, error) { + needsTLS := cluster.TLS != nil || + cluster.SecurityProtocol == "SASL_SSL" || + cluster.SecurityProtocol == "SSL" + + if !needsTLS { + return nil, nil, nil + } + + var warns []string + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if cluster.TLS != nil { + cfg.InsecureSkipVerify = cluster.TLS.Insecure + if cluster.TLS.Insecure { + warns = append(warns, "TLS certificate verification is disabled (insecure)") + } + + if cluster.TLS.Cafile != "" { + caCert, err := os.ReadFile(cluster.TLS.Cafile) + if err != nil { + return nil, nil, fmt.Errorf("read CA file: %w", err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(caCert) { + return nil, nil, fmt.Errorf("CA file %q contains no valid PEM certificates", cluster.TLS.Cafile) + } + cfg.RootCAs = pool + } + + hasClientCert := cluster.TLS.Clientfile != "" + hasClientKey := cluster.TLS.Clientkeyfile != "" + if hasClientCert != hasClientKey { + return nil, nil, fmt.Errorf("mTLS requires both client cert and key, got cert=%q key=%q", cluster.TLS.Clientfile, cluster.TLS.Clientkeyfile) + } + if hasClientCert { + cert, err := tls.LoadX509KeyPair(cluster.TLS.Clientfile, cluster.TLS.Clientkeyfile) + if err != nil { + return nil, nil, fmt.Errorf("load client cert: %w", err) + } + cfg.Certificates = []tls.Certificate{cert} + } + } + + return cfg, warns, nil +} + +func buildSASL(cluster *config.Cluster) (sasl.Mechanism, error) { + if cluster.SASL == nil { + return nil, nil + } + + switch strings.ToUpper(cluster.SASL.Mechanism) { + case "PLAIN": + return plain.Plain(func(_ context.Context) (plain.Auth, error) { + return plain.Auth{ + User: cluster.SASL.Username, + Pass: cluster.SASL.Password, + }, nil + }), nil + + case "SCRAM-SHA-256": + return scram.Sha256(func(_ context.Context) (scram.Auth, error) { + return scram.Auth{ + User: cluster.SASL.Username, + Pass: cluster.SASL.Password, + }, nil + }), nil + + case "SCRAM-SHA-512": + return scram.Sha512(func(_ context.Context) (scram.Auth, error) { + return scram.Auth{ + User: cluster.SASL.Username, + Pass: cluster.SASL.Password, + }, nil + }), nil + + case "OAUTHBEARER": + return oauthMechanism(cluster), nil + + case "AWS_MSK_IAM": + return awsMSKMechanism(cluster) + + default: + return nil, fmt.Errorf("unsupported SASL mechanism: %s", cluster.SASL.Mechanism) + } +} diff --git a/pkg/client/client_integration_test.go b/pkg/client/client_integration_test.go new file mode 100644 index 00000000..10cce6b5 --- /dev/null +++ b/pkg/client/client_integration_test.go @@ -0,0 +1,238 @@ +package client + +import ( + "context" + "testing" + "time" + + "github.com/birdayz/kaf/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" + "github.com/twmb/franz-go/pkg/kgo" +) + +func TestNewClient(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cluster := &config.Cluster{ + Name: "test", + Brokers: []string{brokers}, + } + + cl, _, err := New(cluster) + require.NoError(t, err) + require.NotNil(t, cl) + defer cl.Close() + + assert.NotNil(t, cl.KGO) + assert.NotNil(t, cl.Admin) + + // Verify the client actually works by pinging the cluster. + err = cl.KGO.Ping(ctx) + require.NoError(t, err) +} + +func TestNewClientProduceConsume(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + topic := "test-produce-consume" + + // Create a producer client. + producer, _, err := New(&config.Cluster{ + Name: "producer", + Brokers: []string{brokers}, + }) + require.NoError(t, err) + defer producer.Close() + + // Create the topic first. + _, err = producer.Admin.CreateTopic(ctx, 1, 1, nil, topic) + require.NoError(t, err) + + // Produce a message. + key := []byte("test-key") + value := []byte("test-value") + record := &kgo.Record{ + Topic: topic, + Key: key, + Value: value, + } + err = producer.KGO.ProduceSync(ctx, record).FirstErr() + require.NoError(t, err) + + // Create a consumer client with a consumer group so we get assigned. + consumer, _, err := New(&config.Cluster{ + Name: "consumer", + Brokers: []string{brokers}, + }, + kgo.ConsumeTopics(topic), + kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), + ) + require.NoError(t, err) + defer consumer.Close() + + // Poll for the message with a timeout. + deadline := time.After(10 * time.Second) + var fetched *kgo.Record + for fetched == nil { + select { + case <-deadline: + t.Fatal("timed out waiting for message") + default: + } + fetches := consumer.KGO.PollFetches(ctx) + fetches.EachRecord(func(r *kgo.Record) { + fetched = r + }) + } + + assert.Equal(t, key, fetched.Key) + assert.Equal(t, value, fetched.Value) + assert.Equal(t, topic, fetched.Topic) +} + +func TestNewClientAdmin(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, _, err := New(&config.Cluster{ + Name: "admin-test", + Brokers: []string{brokers}, + }) + require.NoError(t, err) + defer cl.Close() + + topic := "admin-test-topic" + + // Create a topic. + resp, err := cl.Admin.CreateTopic(ctx, 3, 1, nil, topic) + require.NoError(t, err) + require.NoError(t, resp.Err) + + // List topics and verify our topic exists. + listed, err := cl.Admin.ListTopics(ctx) + require.NoError(t, err) + + _, exists := listed[topic] + assert.True(t, exists, "created topic should appear in topic listing") + + details := listed[topic] + assert.Equal(t, 3, len(details.Partitions), "topic should have 3 partitions") +} + +func TestNewClientClose(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, _, err := New(&config.Cluster{ + Name: "close-test", + Brokers: []string{brokers}, + }) + require.NoError(t, err) + + // Verify it works before closing. + err = cl.KGO.Ping(ctx) + require.NoError(t, err) + + // Close the client. + cl.Close() + + // After closing, producing should fail. + err = cl.KGO.ProduceSync(ctx, &kgo.Record{ + Topic: "whatever", + Value: []byte("should-fail"), + }).FirstErr() + assert.Error(t, err, "produce after close should fail") +} + +func TestNewClientWithOpts(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, _, err := New(&config.Cluster{ + Name: "opts-test", + Brokers: []string{brokers}, + }, kgo.ClientID("custom-client-id")) + require.NoError(t, err) + defer cl.Close() + + // Verify the client is functional - ping and metadata should work. + err = cl.KGO.Ping(ctx) + require.NoError(t, err) + + // Also verify we can do admin operations through this client. + brokerMeta, err := cl.Admin.ListBrokers(ctx) + require.NoError(t, err) + assert.NotEmpty(t, brokerMeta, "should have at least one broker") +} + +func TestNewClientInvalidBroker(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + // Use a broker address that nothing is listening on. + cl, _, err := New(&config.Cluster{ + Name: "invalid-broker", + Brokers: []string{"127.0.0.1:19093"}, + }, kgo.DialTimeout(2*time.Second), kgo.MetadataMinAge(100*time.Millisecond)) + // franz-go doesn't return an error on NewClient with bad brokers; + // the error surfaces when you actually try to use the connection. + require.NoError(t, err) + defer cl.Close() + + // Ping should fail because there's nothing at that address. + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + err = cl.KGO.Ping(pingCtx) + assert.Error(t, err, "ping to invalid broker should fail") +} diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go new file mode 100644 index 00000000..cdbf6c91 --- /dev/null +++ b/pkg/client/client_test.go @@ -0,0 +1,187 @@ +package client + +import ( + "crypto/tls" + "testing" + + "github.com/birdayz/kaf/pkg/config" +) + +func TestBuildTLS_NoTLS(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + } + cfg, _, err := buildTLS(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg != nil { + t.Fatal("expected nil TLS config for plain cluster") + } +} + +func TestBuildTLS_SASLSSL(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + SecurityProtocol: "SASL_SSL", + } + cfg, _, err := buildTLS(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected non-nil TLS config for SASL_SSL") + } + if cfg.MinVersion != tls.VersionTLS12 { + t.Fatalf("expected MinVersion TLS 1.2, got %d", cfg.MinVersion) + } +} + +func TestBuildTLS_SSL(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + SecurityProtocol: "SSL", + } + cfg, _, err := buildTLS(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected non-nil TLS config for SSL") + } +} + +func TestBuildTLS_ExplicitTLSBlock(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + TLS: &config.TLS{ + Insecure: true, + }, + } + cfg, _, err := buildTLS(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg == nil { + t.Fatal("expected non-nil TLS config") + } + if !cfg.InsecureSkipVerify { + t.Fatal("expected InsecureSkipVerify=true") + } +} + +func TestBuildTLS_CAFile_NotFound(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + TLS: &config.TLS{ + Cafile: "/nonexistent/ca.pem", + }, + } + _, _, err := buildTLS(cluster) + if err == nil { + t.Fatal("expected error for nonexistent CA file") + } +} + +func TestBuildSASL_NoSASL(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + } + mech, err := buildSASL(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mech != nil { + t.Fatal("expected nil SASL mechanism") + } +} + +func TestBuildSASL_Plain(t *testing.T) { + cluster := &config.Cluster{ + Brokers: []string{"localhost:9092"}, + SASL: &config.SASL{ + Mechanism: "PLAIN", + Username: "user", + Password: "pass", + }, + } + mech, err := buildSASL(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mech == nil { + t.Fatal("expected non-nil SASL mechanism for PLAIN") + } + if mech.Name() != "PLAIN" { + t.Fatalf("expected mechanism name PLAIN, got %s", mech.Name()) + } +} + +func TestBuildSASL_ScramSHA256(t *testing.T) { + cluster := &config.Cluster{ + SASL: &config.SASL{ + Mechanism: "SCRAM-SHA-256", + Username: "user", + Password: "pass", + }, + } + mech, err := buildSASL(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mech == nil { + t.Fatal("expected non-nil SASL mechanism") + } + if mech.Name() != "SCRAM-SHA-256" { + t.Fatalf("expected SCRAM-SHA-256, got %s", mech.Name()) + } +} + +func TestBuildSASL_ScramSHA512(t *testing.T) { + cluster := &config.Cluster{ + SASL: &config.SASL{ + Mechanism: "SCRAM-SHA-512", + Username: "user", + Password: "pass", + }, + } + mech, err := buildSASL(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mech == nil { + t.Fatal("expected non-nil SASL mechanism") + } + if mech.Name() != "SCRAM-SHA-512" { + t.Fatalf("expected SCRAM-SHA-512, got %s", mech.Name()) + } +} + +func TestBuildSASL_CaseInsensitive(t *testing.T) { + cluster := &config.Cluster{ + SASL: &config.SASL{ + Mechanism: "plain", + Username: "user", + Password: "pass", + }, + } + mech, err := buildSASL(cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mech == nil { + t.Fatal("expected non-nil SASL mechanism for lowercase 'plain'") + } +} + +func TestBuildSASL_Unsupported(t *testing.T) { + cluster := &config.Cluster{ + SASL: &config.SASL{ + Mechanism: "KERBEROS", + }, + } + _, err := buildSASL(cluster) + if err == nil { + t.Fatal("expected error for unsupported mechanism") + } +} diff --git a/pkg/cmd/cmd_integration_test.go b/pkg/cmd/cmd_integration_test.go new file mode 100644 index 00000000..44bf5340 --- /dev/null +++ b/pkg/cmd/cmd_integration_test.go @@ -0,0 +1,500 @@ +package cmd_test + +import ( + "bytes" + "context" + "strings" + "testing" + "time" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/birdayz/kaf/pkg/app" + cmdconsume "github.com/birdayz/kaf/pkg/cmd/consume" + cmdgroup "github.com/birdayz/kaf/pkg/cmd/group" + cmdnode "github.com/birdayz/kaf/pkg/cmd/node" + cmdproduce "github.com/birdayz/kaf/pkg/cmd/produce" + cmdquery "github.com/birdayz/kaf/pkg/cmd/query" + cmdtopic "github.com/birdayz/kaf/pkg/cmd/topic" + "github.com/birdayz/kaf/pkg/config" +) + +// startRedpanda starts a Redpanda container and returns the broker address. +func startRedpanda(t *testing.T, ctx context.Context) string { + t.Helper() + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + return brokers +} + +// testApp creates a fresh App wired to the given broker with captured I/O. +func testApp(t *testing.T, brokers string) (*app.App, *bytes.Buffer, *bytes.Buffer) { + t.Helper() + out := &bytes.Buffer{} + errOut := &bytes.Buffer{} + a := &app.App{ + OutWriter: out, + ErrWriter: errOut, + InReader: strings.NewReader(""), + ColorableOut: out, + CurrentCluster: &config.Cluster{ + Brokers: []string{brokers}, + }, + } + return a, out, errOut +} + +// runCmd executes a cobra command with args against a test context. +func runCmd(ctx context.Context, cmd *cobra.Command, args ...string) error { + cmd.SetArgs(args) + return cmd.ExecuteContext(ctx) +} + +// --- Topic commands --- + +func TestTopicCRUD(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-topic-crud" + + // Create + a, out, _ := testApp(t, brokers) + err := runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "3", "-r", "1") + require.NoError(t, err) + assert.Contains(t, out.String(), "Created topic") + assert.Contains(t, out.String(), topicName) + + // List + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "ls") + require.NoError(t, err) + assert.Contains(t, out.String(), topicName) + assert.Contains(t, out.String(), "3") // partitions + + // Topics alias + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewTopicsAlias(a)) + require.NoError(t, err) + assert.Contains(t, out.String(), topicName) + + // Describe + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "describe", topicName) + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, topicName) + assert.Contains(t, output, "Partition") + + // Add config + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "add-config", topicName, "retention.ms", "86400000") + require.NoError(t, err) + assert.Contains(t, out.String(), "Added config") + + // Describe again to verify config shows + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "describe", topicName) + require.NoError(t, err) + assert.Contains(t, out.String(), "retention.ms") + + // Remove config + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "rm-config", topicName, "retention.ms") + require.NoError(t, err) + assert.Contains(t, out.String(), "Removed attributes") + + // Delete + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "delete", topicName, "--noconfirm") + require.NoError(t, err) + assert.Contains(t, out.String(), "Deleted topic") + + // Verify gone + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "ls") + require.NoError(t, err) + assert.NotContains(t, out.String(), topicName) +} + +// --- Node commands --- + +func TestNodeList(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + + // node ls + a, out, _ := testApp(t, brokers) + err := runCmd(ctx, cmdnode.NewCommand(a), "ls") + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, "ID") + assert.Contains(t, output, "ADDRESS") + assert.Contains(t, output, "true") // controller + + // nodes alias + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdnode.NewNodesAlias(a)) + require.NoError(t, err) + assert.Contains(t, out.String(), "true") + + // nodes --no-headers + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdnode.NewNodesAlias(a), "--no-headers") + require.NoError(t, err) + assert.NotContains(t, out.String(), "ID") + assert.NotContains(t, out.String(), "ADDRESS") +} + +// --- Produce + Consume --- + +func TestProduceConsume(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + t.Logf("Broker address: %s", brokers) + topicName := "test-produce-consume" + + // Create topic + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + // Produce a message + a, out, _ := testApp(t, brokers) + a.InReader = strings.NewReader("hello world") + err := runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-k", "mykey") + require.NoError(t, err) + assert.Contains(t, out.String(), "Sent record to partition 0") + + // Produce with header + a, out, _ = testApp(t, brokers) + a.InReader = strings.NewReader("with header") + err = runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-k", "k2", "-H", "env:prod") + require.NoError(t, err) + assert.Contains(t, out.String(), "Sent record") + + // Consume (non-follow, should stop at end) + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName) + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, "hello world") + assert.Contains(t, output, "with header") + + // Consume with --output raw + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw") + require.NoError(t, err) + output = out.String() + assert.Contains(t, output, "hello world") + // Raw mode should not contain metadata like "Partition:" + assert.NotContains(t, output, "Partition:") + + // Consume with --limit-messages + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw", "-l", "1") + require.NoError(t, err) + // Should have exactly one line (one message + one newline) + lines := strings.Split(strings.TrimSpace(out.String()), "\n") + assert.Equal(t, 1, len(lines), "expected exactly 1 message, got: %v", lines) + + // Consume with --output json-each-row + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "json-each-row", "-l", "1") + require.NoError(t, err) + assert.Contains(t, out.String(), `"topic"`) + assert.Contains(t, out.String(), `"payload"`) + + // Consume with --tail + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw", "-n", "1") + require.NoError(t, err) + lines = strings.Split(strings.TrimSpace(out.String()), "\n") + assert.Equal(t, 1, len(lines), "tail 1 should return 1 message, got: %v", lines) + + // Consume with header filter + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw", "--header", "env:prod") + require.NoError(t, err) + output = out.String() + assert.Contains(t, output, "with header") + assert.NotContains(t, output, "hello world") +} + +// --- Group commands --- + +func TestGroupLifecycle(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-group-lifecycle" + groupName := "test-group" + + // Create topic and produce data + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("msg1") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName)) + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("msg2") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName)) + + // Consume with group + commit + a, out, _ := testApp(t, brokers) + err := runCmd(ctx, cmdconsume.NewCommand(a), topicName, "-g", groupName, "--commit", "-l", "2") + require.NoError(t, err) + assert.Contains(t, out.String(), "msg1") + assert.Contains(t, out.String(), "msg2") + + // Wait a moment for the group to be registered + time.Sleep(500 * time.Millisecond) + + // Group list + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewCommand(a), "ls") + require.NoError(t, err) + assert.Contains(t, out.String(), groupName) + + // Groups alias + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewGroupsAlias(a)) + require.NoError(t, err) + assert.Contains(t, out.String(), groupName) + + // Group describe + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewCommand(a), "describe", groupName) + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, groupName) + assert.Contains(t, output, topicName) + + // Group commit (reset to oldest) + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewCommand(a), "commit", groupName, "-t", topicName, "-o", "oldest", "-p", "0", "--noconfirm") + require.NoError(t, err) + assert.Contains(t, out.String(), "Successfully committed") + + // Group delete + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewCommand(a), "delete", groupName) + require.NoError(t, err) + assert.Contains(t, out.String(), "Deleted consumer group") +} + +// --- Query command --- + +func TestQuery(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-query" + + // Create topic and produce + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("target-value") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-k", "findme")) + + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("other-value") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-k", "other")) + + // Query by key + a, out, _ := testApp(t, brokers) + err := runCmd(ctx, cmdquery.NewCommand(a), topicName, "-k", "findme") + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, "findme") + assert.Contains(t, output, "target-value") + assert.NotContains(t, output, "other-value") + + // Query with grep + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdquery.NewCommand(a), topicName, "-k", "findme", "--grep", "target") + require.NoError(t, err) + assert.Contains(t, out.String(), "target-value") +} + +// --- Topic lag --- + +func TestTopicLag(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-topic-lag" + groupName := "test-lag-group" + + // Create topic, produce 5 messages + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + for i := 0; i < 5; i++ { + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("msg") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName)) + } + + // Consume 2 with group (leave lag of 3) + a, _, _ = testApp(t, brokers) + err := runCmd(ctx, cmdconsume.NewCommand(a), topicName, "-g", groupName, "--commit", "-l", "2") + require.NoError(t, err) + + time.Sleep(500 * time.Millisecond) + + // Check lag + a, out, _ := testApp(t, brokers) + err = runCmd(ctx, cmdtopic.NewCommand(a), "lag", topicName) + require.NoError(t, err) + output := out.String() + assert.Contains(t, output, groupName) + assert.Contains(t, output, "3") // lag of 3 +} + +// --- Produce edge cases --- + +func TestProduceRepeat(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-produce-repeat" + + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + // Produce with --repeat + a, out, _ := testApp(t, brokers) + a.InReader = strings.NewReader("repeated") + err := runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-n", "3") + require.NoError(t, err) + assert.Equal(t, 3, strings.Count(out.String(), "Sent record")) + + // Verify all 3 are consumable + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw") + require.NoError(t, err) + assert.Equal(t, 3, strings.Count(out.String(), "repeated")) +} + +// --- Produce with partition --- + +func TestProduceToPartition(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-produce-partition" + + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "3", "-r", "1")) + + // Produce to partition 2 + a, out, _ := testApp(t, brokers) + a.InReader = strings.NewReader("to-p2") + err := runCmd(ctx, cmdproduce.NewCommand(a), topicName, "-p", "2") + require.NoError(t, err) + assert.Contains(t, out.String(), "partition 2") + + // Consume from partition 2 only + a, out, _ = testApp(t, brokers) + err = runCmd(ctx, cmdconsume.NewCommand(a), topicName, "--output", "raw", "-p", "2") + require.NoError(t, err) + assert.Contains(t, out.String(), "to-p2") +} + +// --- Delete offsets --- + +func TestGroupDeleteOffsets(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + brokers := startRedpanda(t, ctx) + topicName := "test-delete-offsets" + groupName := "test-deloff-group" + + // Setup: create topic, produce, consume with group + a, _, _ := testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdtopic.NewCommand(a), "create", topicName, "-p", "1", "-r", "1")) + + a, _, _ = testApp(t, brokers) + a.InReader = strings.NewReader("msg") + require.NoError(t, runCmd(ctx, cmdproduce.NewCommand(a), topicName)) + + a, _, _ = testApp(t, brokers) + require.NoError(t, runCmd(ctx, cmdconsume.NewCommand(a), topicName, "-g", groupName, "--commit", "-l", "1")) + + time.Sleep(500 * time.Millisecond) + + // Verify group has committed offsets + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + admin := kadm.NewClient(cl) + offsets, err := admin.FetchOffsets(ctx, groupName) + require.NoError(t, err) + _, ok := offsets.Lookup(topicName, 0) + require.True(t, ok, "expected committed offset for partition 0") + cl.Close() + + // Delete offsets + a, out, _ := testApp(t, brokers) + err = runCmd(ctx, cmdgroup.NewCommand(a), "delete-offsets", groupName, "-t", topicName, "--all-partitions", "--noconfirm") + require.NoError(t, err) + assert.Contains(t, out.String(), "Successfully deleted offsets") + + // Verify offsets are gone + cl, err = kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + admin = kadm.NewClient(cl) + offsets, err = admin.FetchOffsets(ctx, groupName) + require.NoError(t, err) + _, ok = offsets.Lookup(topicName, 0) + assert.False(t, ok, "expected offset to be deleted") + cl.Close() +} diff --git a/pkg/cmd/completion/command.go b/pkg/cmd/completion/command.go new file mode 100644 index 00000000..772fb89b --- /dev/null +++ b/pkg/cmd/completion/command.go @@ -0,0 +1,68 @@ +package completion + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/birdayz/kaf/pkg/app" +) + +// NewCommand returns the "kaf completion" command. +// It takes the root command so it can generate completions for the full tree. +func NewCommand(root *cobra.Command, a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "completion [SHELL]", + Short: "Generate completion script for bash, zsh, fish or powershell", + Long: `To load completions: + +Bash: + +$ source <(kaf completion bash) + +# To load completions for each session, execute once: +Linux: + $ kaf completion bash > /etc/bash_completion.d/kaf +MacOS: + $ kaf completion bash > /usr/local/etc/bash_completion.d/kaf + +Zsh: + +# To load completions for each session, execute once: +$ kaf completion zsh > "${fpath[1]}/_kaf" + +# You will need to start a new shell for this setup to take effect. + +Fish: + +$ kaf completion fish | source + +# To load completions for each session, execute once: +$ kaf completion fish > ~/.config/fish/completions/kaf.fish +`, + DisableFlagsInUseLine: true, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + RunE: func(cmd *cobra.Command, args []string) error { + switch args[0] { + case "bash": + if err := root.GenBashCompletion(a.OutWriter); err != nil { + return fmt.Errorf("failed to generate bash completion: %w", err) + } + case "zsh": + if err := root.GenZshCompletion(a.OutWriter); err != nil { + return fmt.Errorf("failed to generate zsh completion: %w", err) + } + case "fish": + if err := root.GenFishCompletion(a.OutWriter, true); err != nil { + return fmt.Errorf("failed to generate fish completion: %w", err) + } + case "powershell": + if err := root.GenPowerShellCompletion(a.OutWriter); err != nil { + return fmt.Errorf("failed to generate powershell completion: %w", err) + } + } + return nil + }, + } +} diff --git a/pkg/cmd/config/command.go b/pkg/cmd/config/command.go new file mode 100644 index 00000000..1309d1f4 --- /dev/null +++ b/pkg/cmd/config/command.go @@ -0,0 +1,293 @@ +package config + +import ( + "fmt" + "regexp" + "strings" + + "github.com/manifoldco/promptui" + "github.com/spf13/cobra" + + "github.com/birdayz/kaf/pkg/app" + "github.com/birdayz/kaf/pkg/config" +) + +// NewCommand returns the "kaf config" command with subcommands. +func NewCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Handle kaf configuration", + } + + cmd.AddCommand( + newCurrentContextCommand(a), + newUseClusterCommand(a), + newGetClustersCommand(a), + newAddClusterCommand(a), + newRemoveClusterCommand(a), + newSelectClusterCommand(a), + newAddEventhubCommand(a), + newImportCommand(a), + ) + + return cmd +} + +func newCurrentContextCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "current-context", + Short: "Displays the current context", + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + fmt.Fprintln(a.OutWriter, a.Cfg.CurrentCluster) + }, + } +} + +func newUseClusterCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "use-cluster [NAME]", + Short: "Sets the current cluster in the configuration", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidConfigArgs, + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + if err := a.Cfg.SetCurrentCluster(name); err != nil { + return fmt.Errorf("cluster with name %v not found", name) + } + fmt.Fprintf(a.OutWriter, "Switched to cluster \"%v\".\n", name) + return nil + }, + } +} + +func newGetClustersCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "get-clusters", + Short: "Display clusters in the configuration file", + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + if !a.NoHeaderFlag { + fmt.Fprintln(a.OutWriter, " NAME") + } + for _, cluster := range a.Cfg.Clusters { + marker := " " + if cluster.Name == a.Cfg.CurrentCluster { + marker = "* " + } + fmt.Fprintf(a.OutWriter, "%s%s\n", marker, cluster.Name) + } + }, + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func newAddClusterCommand(a *app.App) *cobra.Command { + var brokerVersion string + + cmd := &cobra.Command{ + Use: "add-cluster [NAME]", + Short: "Add cluster", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + if a.Cfg.HasCluster(name) { + return fmt.Errorf("could not add cluster: cluster with name '%v' exists already", name) + } + + a.Cfg.Clusters = append(a.Cfg.Clusters, &config.Cluster{ + Name: name, + Brokers: a.BrokersFlag, + SchemaRegistryURL: a.SchemaRegistryURL, + Version: brokerVersion, + }) + if err := a.Cfg.Write(); err != nil { + return fmt.Errorf("unable to write config: %w", err) + } + fmt.Fprintln(a.OutWriter, "Added cluster.") + return nil + }, + } + + cmd.Flags().StringVar(&brokerVersion, "broker-version", "", "Broker version (stored in config, not used by franz-go which auto-negotiates)") + return cmd +} + +func newRemoveClusterCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "remove-cluster [NAME]", + Short: "remove cluster", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidConfigArgs, + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + + pos := -1 + for i, cluster := range a.Cfg.Clusters { + if cluster.Name == name { + pos = i + break + } + } + + if pos == -1 { + return fmt.Errorf("could not delete cluster: cluster with name '%v' does not exist", name) + } + + a.Cfg.Clusters = append(a.Cfg.Clusters[:pos], a.Cfg.Clusters[pos+1:]...) + + if err := a.Cfg.Write(); err != nil { + return fmt.Errorf("unable to write config: %w", err) + } + fmt.Fprintln(a.OutWriter, "Removed cluster.") + return nil + }, + } +} + +func newSelectClusterCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "select-cluster", + Aliases: []string{"ls"}, + Short: "Interactively select a cluster", + RunE: func(cmd *cobra.Command, args []string) error { + var clusterNames []string + pos := 0 + for k, cluster := range a.Cfg.Clusters { + clusterNames = append(clusterNames, cluster.Name) + if cluster.Name == a.Cfg.CurrentCluster { + pos = k + } + } + + searcher := func(input string, index int) bool { + cluster := clusterNames[index] + name := strings.ReplaceAll(strings.ToLower(cluster), " ", "") + input = strings.ReplaceAll(strings.ToLower(input), " ", "") + return strings.Contains(name, input) + } + + p := promptui.Select{ + Label: "Select cluster", + Items: clusterNames, + Searcher: searcher, + Size: 10, + CursorPos: pos, + } + + _, selected, err := p.Run() + if err != nil { + // User cancelled (e.g. Ctrl-C). Not an error. + return nil + } + + if err := a.Cfg.SetCurrentCluster(selected); err != nil { + return fmt.Errorf("cluster with name %v not found", selected) + } + fmt.Fprintf(a.OutWriter, "Switched to cluster \"%v\".\n", selected) + return nil + }, + } +} + +func newAddEventhubCommand(a *app.App) *cobra.Command { + var connString string + + cmd := &cobra.Command{ + Use: "add-eventhub [NAME]", + Example: "kaf config add-eventhub my-eventhub --eh-connstring 'Endpoint=sb://......AccessKey=....'", + Short: "Add Azure EventHub", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + name := args[0] + if a.Cfg.HasCluster(name) { + return fmt.Errorf("could not add cluster: cluster with name '%v' exists already", name) + } + + r := regexp.MustCompile(`^Endpoint=sb://(.*)\.servicebus.*$`) + hubName := r.FindStringSubmatch(connString) + if len(hubName) != 2 { + return fmt.Errorf("failed to determine EventHub name from Connection String -- check your ConnectionString") + } + + a.Cfg.Clusters = append(a.Cfg.Clusters, &config.Cluster{ + Name: name, + Brokers: []string{hubName[1] + ".servicebus.windows.net:9093"}, + SchemaRegistryURL: a.SchemaRegistryURL, + SASL: &config.SASL{ + Mechanism: "PLAIN", + Username: "$ConnectionString", + Password: connString, + }, + SecurityProtocol: "SASL_SSL", + }) + if err := a.Cfg.Write(); err != nil { + return fmt.Errorf("unable to write config: %w", err) + } + fmt.Fprintln(a.OutWriter, "Added EventHub.") + return nil + }, + } + + cmd.Flags().StringVar(&connString, "eh-connstring", "", "EventHub ConnectionString") + return cmd +} + +func newImportCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "import [ccloud]", + Short: "Import configurations into the $HOME/.kaf/config file", + RunE: func(cmd *cobra.Command, args []string) error { + path, err := config.TryFindCcloudConfigFile() + if err != nil { + return fmt.Errorf("could not find Confluent Cloud config file: %w", err) + } + fmt.Fprintf(a.OutWriter, "Detected Confluent Cloud config in file %v\n", path) + username, password, broker, err := config.ParseConfluentCloudConfig(path) + if err != nil { + return fmt.Errorf("failed to parse Confluent Cloud config: %w", err) + } + + newCluster := &config.Cluster{ + Name: "ccloud", + Brokers: []string{broker}, + SASL: &config.SASL{ + Username: username, + Password: password, + Mechanism: "PLAIN", + }, + SecurityProtocol: "SASL_SSL", + } + + var found bool + for i, c := range a.Cfg.Clusters { + if c.Name == "ccloud" { + found = true + a.Cfg.Clusters[i] = newCluster + break + } + } + + if !found { + fmt.Fprintln(a.OutWriter, "Wrote new entry to config file") + a.Cfg.Clusters = append(a.Cfg.Clusters, newCluster) + } + + if a.Cfg.CurrentCluster == "" { + a.Cfg.CurrentCluster = newCluster.Name + } + if err = a.Cfg.Write(); err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + return nil + }, + ValidArgs: []string{"ccloud"}, + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.OnlyValidArgs(cmd, args); err != nil { + return err + } + return cobra.ExactArgs(1)(cmd, args) + }, + } +} diff --git a/pkg/cmd/consume/command.go b/pkg/cmd/consume/command.go new file mode 100644 index 00000000..5fad7501 --- /dev/null +++ b/pkg/cmd/consume/command.go @@ -0,0 +1,271 @@ +package consume + +import ( + "fmt" + "strconv" + "strings" + "sync" + + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/birdayz/kaf/pkg/app" +) + +// NewCommand returns the "kaf consume" command. +func NewCommand(a *app.App) *cobra.Command { + var ( + offsetFlag string + groupFlag string + groupCommitFlag bool + outputFormat = app.OutputFormatDefault + raw bool + follow bool + tail int32 + flagPartitions []int32 + limitMessagesFlag int64 + headerFilterFlag []string + ) + + cmd := &cobra.Command{ + Use: "consume TOPIC", + Short: "Consume messages", + Long: "Consume messages from a Kafka topic. Supports consumer groups, partition selection, offset control, tail mode, and multiple output formats.", + Example: ` kaf consume my-topic + kaf consume my-topic -f + kaf consume my-topic --offset newest -f + kaf consume my-topic -n 10 + kaf consume my-topic -g my-group --commit + kaf consume my-topic --output json + kaf consume my-topic --header "env:prod"`, + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + PreRunE: a.SetupProtoDescriptorRegistry, + RunE: func(cmd *cobra.Command, args []string) error { + topic := args[0] + ctx := cmd.Context() + + if outputFormat == app.OutputFormatDefault && raw { + outputFormat = app.OutputFormatRaw + } + + if groupFlag != "" && cmd.Flags().Changed("offset") { + fmt.Fprintf(a.ErrWriter, "WARNING: --offset is ignored when using --group (consumer group manages offsets)\n") + } + + headerFilter := make(map[string]string) + for _, f := range headerFilterFlag { + parts := strings.SplitN(f, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid header filter format: %s, expected format: key:value", f) + } + headerFilter[parts[0]] = parts[1] + } + + var scErr error + a.SchemaCache, scErr = a.NewSchemaCache() + if scErr != nil { + return scErr + } + + var opts []kgo.Opt + + // Whether we'll use ConsumePartitions (explicit partitions or tail mode). + // ConsumeTopics and ConsumePartitions for the same topic are mutually + // exclusive in franz-go, so we must pick one. + useConsumePartitions := (len(flagPartitions) > 0 || tail > 0) && groupFlag == "" + + if groupFlag != "" { + opts = append(opts, kgo.ConsumeTopics(topic)) + opts = append(opts, kgo.ConsumerGroup(groupFlag)) + if !groupCommitFlag { + opts = append(opts, kgo.DisableAutoCommit()) + } + } else { + switch offsetFlag { + case "oldest": + opts = append(opts, kgo.ConsumeResetOffset(kgo.NewOffset().AtStart())) + case "newest": + opts = append(opts, kgo.ConsumeResetOffset(kgo.NewOffset().AtEnd())) + default: + o, err := strconv.ParseInt(offsetFlag, 10, 64) + if err != nil { + return fmt.Errorf("could not parse '%s' to int64: %v", offsetFlag, err) + } + opts = append(opts, kgo.ConsumeResetOffset(kgo.NewOffset().At(o))) + } + + if len(flagPartitions) > 0 && tail == 0 { + // Explicit partitions without tail: set up ConsumePartitions now. + partMap := make(map[string]map[int32]kgo.Offset) + offsets := make(map[int32]kgo.Offset, len(flagPartitions)) + for _, p := range flagPartitions { + switch offsetFlag { + case "oldest": + offsets[p] = kgo.NewOffset().AtStart() + case "newest": + offsets[p] = kgo.NewOffset().AtEnd() + default: + o, _ := strconv.ParseInt(offsetFlag, 10, 64) + offsets[p] = kgo.NewOffset().At(o) + } + } + partMap[topic] = offsets + opts = append(opts, kgo.ConsumePartitions(partMap)) + } + + if !useConsumePartitions { + opts = append(opts, kgo.ConsumeTopics(topic)) + } + } + + var endOffsetMap map[int32]int64 + needsAdmin := tail > 0 || (!follow && groupFlag == "" && tail == 0) + if needsAdmin { + admCl, err := a.NewKafClient() + if err != nil { + return err + } + defer admCl.Close() + + if tail > 0 { + endOffsets, err := admCl.Admin.ListEndOffsets(ctx, topic) + if err != nil { + return fmt.Errorf("failed to get end offsets: %v", err) + } + startOffsets, err := admCl.Admin.ListStartOffsets(ctx, topic) + if err != nil { + return fmt.Errorf("failed to get start offsets: %v", err) + } + + partFilterSet := make(map[int32]bool, len(flagPartitions)) + for _, p := range flagPartitions { + partFilterSet[p] = true + } + + partMap := make(map[string]map[int32]kgo.Offset) + offsets := make(map[int32]kgo.Offset) + endOffsets.Each(func(lo kadm.ListedOffset) { + if lo.Topic != topic { + return + } + if len(partFilterSet) > 0 && !partFilterSet[lo.Partition] { + return + } + start := lo.Offset - int64(tail) + if so, ok := startOffsets.Lookup(topic, lo.Partition); ok && start < so.Offset { + start = so.Offset + } + offsets[lo.Partition] = kgo.NewOffset().At(start) + }) + partMap[topic] = offsets + opts = append(opts, kgo.ConsumePartitions(partMap)) + + // Tail mode also needs end offsets to know when to stop. + if !follow { + endOffsetMap = app.EndOffsetMapForTopic(endOffsets, topic) + } + } + + if !follow && groupFlag == "" && tail == 0 { + endOffs, err := admCl.Admin.ListEndOffsets(ctx, topic) + if err != nil { + return fmt.Errorf("failed to get end offsets: %v", err) + } + endOffsetMap = app.EndOffsetMapForTopic(endOffs, topic) + } + } + + kafCl, err := a.NewKafClient(opts...) + if err != nil { + return err + } + defer kafCl.Close() + + var mu sync.Mutex + var totalMessageCount int64 + maxOffsetSeen := make(map[int32]int64) + + for { + fetches := kafCl.KGO.PollFetches(ctx) + if fetches.IsClientClosed() || ctx.Err() != nil { + return nil + } + + limitReached := false + fetches.EachRecord(func(rec *kgo.Record) { + if limitReached { + return + } + if limitMessagesFlag > 0 && totalMessageCount >= limitMessagesFlag { + limitReached = true + return + } + a.HandleMessage(rec, &mu, outputFormat, headerFilter) + totalMessageCount++ + if cur, ok := maxOffsetSeen[rec.Partition]; !ok || rec.Offset > cur { + maxOffsetSeen[rec.Partition] = rec.Offset + } + }) + + if errs := fetches.Errors(); len(errs) > 0 { + for _, e := range errs { + fmt.Fprintf(a.ErrWriter, "fetch error topic %s partition %d: %v\n", e.Topic, e.Partition, e.Err) + } + } + + if groupCommitFlag && groupFlag != "" { + if err := kafCl.KGO.CommitUncommittedOffsets(ctx); err != nil { + fmt.Fprintf(a.ErrWriter, "commit error: %v\n", err) + } + } + + if limitReached { + return nil + } + + if !follow && groupFlag == "" { + if endOffsetMap != nil { + done := true + for p, endOff := range endOffsetMap { + if endOff == 0 { + continue + } + seen, ok := maxOffsetSeen[p] + if !ok || seen+1 < endOff { + done = false + break + } + } + if done { + return nil + } + } + } + } + }, + } + + cmd.Flags().StringVar(&offsetFlag, "offset", "oldest", "Offset to start consuming. Possible values: oldest, newest, or integer.") + cmd.Flags().BoolVar(&raw, "raw", false, "Print raw output of messages, without key or prettified JSON") + cmd.Flags().Var(&outputFormat, "output", "Set output format messages: default, raw (without key or prettified JSON), hex (without key or prettified JSON), json, json-each-row") + cmd.Flags().BoolVarP(&follow, "follow", "f", false, "Continue to consume messages until program execution is interrupted/terminated") + cmd.Flags().Int32VarP(&tail, "tail", "n", 0, "Print last n messages per partition") + a.AddProtoFlags(cmd) + cmd.Flags().BoolVar(&a.DecodeMsgPack, "decode-msgpack", false, "Enable deserializing msgpack") + cmd.Flags().Int32SliceVarP(&flagPartitions, "partitions", "p", []int32{}, "Partitions to consume from") + cmd.Flags().Int64VarP(&limitMessagesFlag, "limit-messages", "l", 0, "Limit messages per partition") + cmd.Flags().StringVarP(&groupFlag, "group", "g", "", "Consumer Group to use for consume") + cmd.Flags().BoolVar(&groupCommitFlag, "commit", false, "Commit Group offset after receiving messages. Works only if consuming as Consumer Group") + cmd.Flags().StringSliceVar(&headerFilterFlag, "header", []string{}, "Filter messages by header. Format: key:value. Multiple filters can be specified") + + if err := cmd.RegisterFlagCompletionFunc("output", app.CompleteOutputFormat); err != nil { + panic(fmt.Sprintf("Failed to register flag completion: %v", err)) + } + if err := cmd.Flags().MarkDeprecated("raw", "use --output raw instead"); err != nil { + panic(fmt.Sprintf("Failed to mark flag as deprecated: %v", err)) + } + + return cmd +} diff --git a/pkg/cmd/group/command.go b/pkg/cmd/group/command.go new file mode 100644 index 00000000..4ffeec1b --- /dev/null +++ b/pkg/cmd/group/command.go @@ -0,0 +1,649 @@ +package group + +import ( + "encoding/json" + "fmt" + "slices" + "strconv" + "sync" + "time" + + "github.com/manifoldco/promptui" + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" + "golang.org/x/sync/errgroup" + + "github.com/birdayz/kaf/pkg/app" + pkggroup "github.com/birdayz/kaf/pkg/group" + pkgtopic "github.com/birdayz/kaf/pkg/topic" +) + +// NewCommand returns the "kaf group" parent command. +func NewCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "group", + Short: "Display information about consumer groups.", + } + cmd.AddCommand( + newListCommand(a), + newDescribeCommand(a), + newDeleteCommand(a), + newDeleteOffsetsCommand(a), + newPeekCommand(a), + newCommitCommand(a), + ) + return cmd +} + +// NewGroupsAlias returns the "kaf groups" alias for "kaf group ls". +func NewGroupsAlias(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "groups", + Short: "List groups", + RunE: listGroupsRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func listGroupsRunE(a *app.App) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + groups, err := pkggroup.List(cmd.Context(), cl.Admin) + if err != nil { + return fmt.Errorf("unable to list consumer groups: %w", err) + } + + w := app.NewTabWriter(a.OutWriter) + if !a.NoHeaderFlag { + fmt.Fprintf(w, "NAME\tSTATE\tCONSUMERS\t\n") + } + for _, g := range groups { + fmt.Fprintf(w, "%v\t%v\t%v\t\n", g.Name, g.State, g.Consumers) + } + w.Flush() + return nil + } +} + +func newListCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List groups", + Args: cobra.NoArgs, + RunE: listGroupsRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func newDeleteCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "delete GROUP", + Short: "Delete group", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidGroupArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + grp := args[0] + err = pkggroup.Delete(cmd.Context(), cl.Admin, grp) + if err != nil { + return fmt.Errorf("could not delete consumer group %v: %w", grp, err) + } + fmt.Fprintf(a.OutWriter, "Deleted consumer group %v.\n", grp) + return nil + }, + } +} + +func newDeleteOffsetsCommand(a *app.App) *cobra.Command { + var ( + topic string + partitionFlag int32 + allPartitions bool + offsetMap string + noconfirm bool + ) + cmd := &cobra.Command{ + Use: "delete-offsets GROUP", + Short: "Delete consumer group offsets", + Long: "Delete consumer group offsets for a topic. The consumer group must not have active consumers.", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidGroupArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + ctx := cmd.Context() + grp := args[0] + + if topic == "" { + return fmt.Errorf("--topic is required") + } + + var partitions []int32 + + if offsetMap != "" { + // Try parsing as array first: ["0", "1", "2"] + var partitionStrings []string + err := json.Unmarshal([]byte(offsetMap), &partitionStrings) + if err != nil { + // Try parsing as map: {"0":null, "1":null} + var partitionMap map[string]any + err2 := json.Unmarshal([]byte(offsetMap), &partitionMap) + if err2 != nil { + return fmt.Errorf("invalid --offset-map format. Use JSON array like [\"0\", \"1\"] or map like {\"0\":null}") + } + for key := range partitionMap { + partitionStrings = append(partitionStrings, key) + } + } + for _, pStr := range partitionStrings { + p, err := strconv.ParseInt(pStr, 10, 32) + if err != nil { + return fmt.Errorf("invalid partition ID in offset-map: %v", pStr) + } + partitions = append(partitions, int32(p)) + } + } else if allPartitions { + td, err := pkgtopic.Describe(ctx, cl.Admin, topic) + if err != nil { + return fmt.Errorf("unable to describe topic: %w", err) + } + for _, p := range td.Partitions { + partitions = append(partitions, p.ID) + } + } else if partitionFlag != -1 { + partitions = []int32{partitionFlag} + } else { + return fmt.Errorf("either --partition, --all-partitions or --offset-map flag must be provided") + } + + slices.Sort(partitions) + + if len(partitions) == 0 { + return fmt.Errorf("no partitions to delete offsets for") + } + + described, err := cl.Admin.DescribeGroups(ctx, grp) + if err != nil { + return fmt.Errorf("unable to describe consumer groups: %w", err) + } + if g, ok := described[grp]; ok { + if !slices.Contains([]string{"Empty", "Dead"}, g.State) { + return fmt.Errorf("consumer group %s has active consumers in it, cannot delete offsets", grp) + } + } + + fmt.Fprintf(a.OutWriter, "Will delete offsets for group '%s' on topic '%s' for partitions: %v\n", grp, topic, partitions) + + if !noconfirm { + prompt := promptui.Prompt{ + Label: "Delete offsets as described", + IsConfirm: true, + } + if _, err := prompt.Run(); err != nil { + return fmt.Errorf("aborted, exiting") + } + } + + ts := make(kadm.TopicsSet) + for _, p := range partitions { + ts.Add(topic, p) + } + + resps, err := cl.Admin.DeleteOffsets(ctx, grp, ts) + if err != nil { + return fmt.Errorf("failed to delete offsets: %w", err) + } + + var succeeded []int32 + var failMsgs []string + for _, p := range partitions { + pErr, ok := resps.Lookup(topic, p) + if !ok { + failMsgs = append(failMsgs, fmt.Sprintf(" Partition %d: no response", p)) + } else if pErr != nil { + failMsgs = append(failMsgs, fmt.Sprintf(" Partition %d: %v", p, pErr)) + } else { + succeeded = append(succeeded, p) + } + } + + if len(succeeded) > 0 { + fmt.Fprintf(a.OutWriter, "Successfully deleted offsets for partitions: %v\n", succeeded) + } + if len(failMsgs) > 0 { + fmt.Fprintf(a.ErrWriter, "Failed to delete offsets for the following partitions:\n") + for _, msg := range failMsgs { + fmt.Fprintln(a.ErrWriter, msg) + } + return fmt.Errorf("some offset deletions failed") + } + return nil + }, + } + cmd.Flags().StringVarP(&topic, "topic", "t", "", "topic") + cmd.Flags().Int32VarP(&partitionFlag, "partition", "p", -1, "partition") + cmd.Flags().BoolVar(&allPartitions, "all-partitions", false, "delete offsets for all partitions") + cmd.Flags().StringVar(&offsetMap, "offset-map", "", "delete offsets for specific partitions in JSON format, e.g. [\"0\", \"1\", \"2\"] or {\"0\":null, \"1\":null}") + cmd.Flags().BoolVar(&noconfirm, "noconfirm", false, "Do not prompt for confirmation") + return cmd +} + +func newDescribeCommand(a *app.App) *cobra.Command { + var ( + flagNoMembers bool + flagDescribeTopics []string + ) + cmd := &cobra.Command{ + Use: "describe GROUP", + Short: "Describe consumer group", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidGroupArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + desc, err := pkggroup.Describe(cmd.Context(), cl.Admin, args[0], flagDescribeTopics) + if err != nil { + return fmt.Errorf("unable to describe consumer group: %w", err) + } + + w := app.NewTabWriter(a.OutWriter) + fmt.Fprintf(w, "Group ID:\t%v\n", desc.Group) + fmt.Fprintf(w, "State:\t%v\n", desc.State) + fmt.Fprintf(w, "Protocol:\t%v\n", desc.Protocol) + fmt.Fprintf(w, "Protocol Type:\t%v\n", desc.ProtocolType) + fmt.Fprintf(w, "Offsets:\t\n") + + w.Flush() + w.Init(a.OutWriter, app.TabwriterMinWidthNested, 4, 2, app.TabwriterPadChar, app.TabwriterFlags) + + for _, to := range desc.Topics { + fmt.Fprintf(w, "\t%v:\n", to.Topic) + fmt.Fprintf(w, "\t\tPartition\tGroup Offset\tHigh Watermark\tLag\tMetadata\t\n") + fmt.Fprintf(w, "\t\t---------\t------------\t--------------\t---\t--------\n") + + for _, p := range to.Partitions { + fmt.Fprintf(w, "\t\t%v\t%v\t%v\t%v\t%v\n", p.Partition, p.GroupOffset, p.HighWatermark, p.Lag, p.Metadata) + } + fmt.Fprintf(w, "\t\tTotal\t%d\t\t%d\t\n", to.TotalOffset, to.TotalLag) + } + + if !flagNoMembers { + fmt.Fprintf(w, "Members:\t") + + w.Flush() + w.Init(a.OutWriter, app.TabwriterMinWidthNested, 4, 2, app.TabwriterPadChar, app.TabwriterFlags) + + fmt.Fprintln(w) + for _, member := range desc.Members { + fmt.Fprintf(w, "\t%v:\n", member.ClientID) + fmt.Fprintf(w, "\t\tHost:\t%v\n", member.ClientHost) + + if len(member.TopicPartitions) > 0 { + fmt.Fprintf(w, "\t\tAssignments:\n") + fmt.Fprintf(w, "\t\t Topic\tPartitions\t\n") + fmt.Fprintf(w, "\t\t -----\t----------\t") + + for topic, partitions := range member.TopicPartitions { + fmt.Fprintf(w, "\n\t\t %v\t%v\t", topic, partitions) + } + } + fmt.Fprintf(w, "\n") + } + } + + w.Flush() + return nil + }, + } + cmd.Flags().BoolVar(&flagNoMembers, "no-members", false, "Hide members section of the output") + cmd.Flags().StringSliceVarP(&flagDescribeTopics, "topic", "t", []string{}, "topics to display for the group. defaults to all topics.") + return cmd +} + +func newPeekCommand(a *app.App) *cobra.Command { + var ( + flagPeekPartitions []int32 + flagPeekBefore int64 + flagPeekAfter int64 + flagPeekTopics []string + ) + cmd := &cobra.Command{ + Use: "peek GROUP", + Short: "Peek messages from consumer group offset", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidGroupArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + grpName := args[0] + + described, err := cl.Admin.DescribeGroups(ctx, grpName) + if err != nil { + return fmt.Errorf("unable to describe consumer groups: %w", err) + } + g, ok := described[grpName] + if !ok || g.State == "Dead" { + fmt.Fprintf(a.OutWriter, "Group %v not found.\n", grpName) + return nil + } + + peekPartitions := make(map[int32]struct{}) + for _, p := range flagPeekPartitions { + peekPartitions[p] = struct{}{} + } + + var topicFilter []string + if len(flagPeekTopics) > 0 { + topicFilter = flagPeekTopics + for _, t := range flagPeekTopics { + _, err := pkgtopic.Describe(ctx, cl.Admin, t) + if err != nil { + return fmt.Errorf("topic %v not found: %w", t, err) + } + } + } + + offsets, err := cl.Admin.FetchOffsets(ctx, grpName) + if err != nil { + return fmt.Errorf("failed to fetch group offsets: %w", err) + } + + partMap := make(map[string]map[int32]kgo.Offset) + + type peekTarget struct { + topic string + partition int32 + endOffset int64 + } + var targets []peekTarget + + offsets.Each(func(o kadm.OffsetResponse) { + t := o.Topic + p := o.Partition + committed := o.At + + if len(topicFilter) > 0 && !slices.Contains(topicFilter, t) { + return + } + if len(peekPartitions) > 0 { + if _, ok := peekPartitions[p]; !ok { + return + } + } + + start := max(committed-flagPeekBefore, 0) + + if partMap[t] == nil { + partMap[t] = make(map[int32]kgo.Offset) + } + partMap[t][p] = kgo.NewOffset().At(start) + + targets = append(targets, peekTarget{ + topic: t, + partition: p, + endOffset: committed + flagPeekAfter, + }) + }) + + if len(targets) == 0 { + fmt.Fprintf(a.OutWriter, "No committed offsets found for group %v.\n", grpName) + return nil + } + + endLookup := make(map[string]map[int32]int64) + for _, tgt := range targets { + if endLookup[tgt.topic] == nil { + endLookup[tgt.topic] = make(map[int32]int64) + } + endLookup[tgt.topic][tgt.partition] = tgt.endOffset + } + + peekCl, err := a.NewKafClient(kgo.ConsumePartitions(partMap)) + if err != nil { + return err + } + defer peekCl.Close() + + mu := &sync.Mutex{} + + for { + fetches := peekCl.KGO.PollFetches(ctx) + if fetches.IsClientClosed() || ctx.Err() != nil { + return nil + } + + allDone := true + fetches.EachRecord(func(rec *kgo.Record) { + ends, ok := endLookup[rec.Topic] + if !ok { + return + } + endOff, ok := ends[rec.Partition] + if !ok { + return + } + if rec.Offset < endOff { + a.HandleMessage(rec, mu, app.OutputFormatDefault, nil) + } + if rec.Offset+1 < endOff { + allDone = false + } + }) + + if allDone { + return nil + } + } + }, + } + cmd.Flags().StringSliceVarP(&flagPeekTopics, "topics", "t", []string{}, "Topics to peek from") + cmd.Flags().Int32SliceVarP(&flagPeekPartitions, "partitions", "p", []int32{}, "Partitions to peek from") + cmd.Flags().Int64VarP(&flagPeekBefore, "before", "B", 0, "Number of messages to peek before current offset") + cmd.Flags().Int64VarP(&flagPeekAfter, "after", "A", 0, "Number of messages to peek after current offset") + return cmd +} + +func newCommitCommand(a *app.App) *cobra.Command { + var ( + topic string + offset string + partitionFlag int32 + allPartitions bool + offsetMap string + noconfirm bool + ) + cmd := &cobra.Command{ + Use: "commit GROUP", + Short: "Set offset for given consumer group", + Long: "Set offset for a given consumer group, creates one if it does not exist. Offsets cannot be set on a consumer group with active consumers.", + Example: ` kaf group commit my-group -t my-topic -o oldest --all-partitions + kaf group commit my-group -t my-topic -o newest -p 0 + kaf group commit my-group -t my-topic -o 42 -p 0 + kaf group commit my-group -t my-topic -o 2024-01-15T10:00:00Z --all-partitions + kaf group commit my-group --offset-map '{"0":123,"1":456}' -t my-topic`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + ctx := cmd.Context() + grp := args[0] + partitionOffsets := make(map[int32]int64) + + if topic == "" { + return fmt.Errorf("--topic is required") + } + if offsetMap == "" { + if offset == "" { + return fmt.Errorf("--offset is required when not using --offset-map") + } + } + + if offsetMap != "" { + if err := json.Unmarshal([]byte(offsetMap), &partitionOffsets); err != nil { + return fmt.Errorf("wrong --offset-map format. Use JSON with keys as partition numbers and values as offsets.\nExample: --offset-map '{\"0\":123, \"1\":135, \"2\":120}'") + } + } else { + var partitions []int32 + if allPartitions { + topics, err := cl.Admin.ListTopics(ctx, topic) + if err != nil { + return fmt.Errorf("unable to list topic: %w", err) + } + td, ok := topics[topic] + if !ok { + return fmt.Errorf("topic %s not found", topic) + } + for _, p := range td.Partitions.Sorted() { + partitions = append(partitions, p.Partition) + } + } else if partitionFlag != -1 { + partitions = []int32{partitionFlag} + } else { + return fmt.Errorf("either --partition, --all-partitions or --offset-map flag must be provided") + } + + slices.Sort(partitions) + + type assignment struct { + partition int32 + offset int64 + } + + g, gctx := errgroup.WithContext(ctx) + var mu sync.Mutex + var results []assignment + + for _, partition := range partitions { + g.Go(func() error { + i, err := strconv.ParseInt(offset, 10, 64) + if err != nil { + if offset == "oldest" || offset == "earliest" { + listed, err := cl.Admin.ListStartOffsets(gctx, topic) + if err != nil { + return fmt.Errorf("failed to get start offsets: %w", err) + } + if lo, ok := listed.Lookup(topic, partition); ok { + i = lo.Offset + } else { + fmt.Fprintf(a.OutWriter, "Partition %v: could not determine start offset. Skipping.\n", partition) + return nil + } + } else if offset == "newest" || offset == "latest" { + listed, err := cl.Admin.ListEndOffsets(gctx, topic) + if err != nil { + return fmt.Errorf("failed to get end offsets: %w", err) + } + if lo, ok := listed.Lookup(topic, partition); ok { + i = lo.Offset + } else { + fmt.Fprintf(a.OutWriter, "Partition %v: could not determine end offset. Skipping.\n", partition) + return nil + } + } else { + t, err := time.Parse(time.RFC3339, offset) + if err != nil { + return fmt.Errorf("offset is neither offset nor timestamp") + } + millis := t.UnixMilli() + listed, err := cl.Admin.ListOffsetsAfterMilli(gctx, millis, topic) + if err != nil { + return fmt.Errorf("failed to determine offset for timestamp: %w", err) + } + lo, ok := listed.Lookup(topic, partition) + if !ok || lo.Offset == -1 { + fmt.Fprintf(a.OutWriter, "Partition %v: could not determine offset from timestamp. Skipping.\n", partition) + return nil + } + i = lo.Offset + fmt.Fprintf(a.OutWriter, "Partition %v: determined offset %v from timestamp.\n", partition, i) + } + } + mu.Lock() + results = append(results, assignment{partition: partition, offset: i}) + mu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return err + } + + for _, assign := range results { + partitionOffsets[assign.partition] = assign.offset + } + } + + described, err := cl.Admin.DescribeGroups(ctx, grp) + if err != nil { + return fmt.Errorf("unable to describe consumer groups: %w", err) + } + if g, ok := described[grp]; ok { + if !slices.Contains([]string{"Empty", "Dead"}, g.State) { + return fmt.Errorf("consumer group %s has active consumers in it, cannot set offset", grp) + } + } + + fmt.Fprintf(a.OutWriter, "Resetting offsets to: %v\n", partitionOffsets) + + if !noconfirm { + prompt := promptui.Prompt{ + Label: "Reset offsets as described", + IsConfirm: true, + } + _, err := prompt.Run() + if err != nil { + return fmt.Errorf("aborted, exiting") + } + } + + topicOffsets := map[string]map[int32]int64{ + topic: partitionOffsets, + } + err = pkggroup.CommitOffsets(ctx, cl.Admin, grp, topicOffsets) + if err != nil { + return fmt.Errorf("failed to commit offset: %w", err) + } + + fmt.Fprintf(a.OutWriter, "Successfully committed offsets to %v.\n", partitionOffsets) + return nil + }, + } + cmd.Flags().StringVarP(&topic, "topic", "t", "", "topic") + cmd.Flags().StringVarP(&offset, "offset", "o", "", "offset to commit (integer, oldest/earliest, newest/latest, or RFC3339 timestamp)") + cmd.Flags().Int32VarP(&partitionFlag, "partition", "p", -1, "partition") + cmd.Flags().BoolVar(&allPartitions, "all-partitions", false, "apply to all partitions") + cmd.Flags().StringVar(&offsetMap, "offset-map", "", "set different offsets per different partitions in JSON format, e.g. {\"0\": 123, \"1\": 42}") + cmd.Flags().BoolVar(&noconfirm, "noconfirm", false, "Do not prompt for confirmation") + return cmd +} diff --git a/pkg/cmd/node/command.go b/pkg/cmd/node/command.go new file mode 100644 index 00000000..76859caf --- /dev/null +++ b/pkg/cmd/node/command.go @@ -0,0 +1,77 @@ +package node + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + "github.com/birdayz/kaf/pkg/app" + pkgnode "github.com/birdayz/kaf/pkg/node" +) + +// NewCommand returns the "kaf node" command with subcommands. +func NewCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Describe and List nodes", + } + + lsCmd := newListCommand(a) + cmd.AddCommand(lsCmd) + + return cmd +} + +// NewNodesAlias returns the top-level "nodes" alias. +func NewNodesAlias(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "nodes", + Short: "List nodes in a cluster", + RunE: listNodesRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func newListCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List nodes in a cluster", + RunE: listNodesRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func listNodesRunE(a *app.App) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + nodes, err := pkgnode.List(cmd.Context(), cl.Admin) + if err != nil { + return fmt.Errorf("unable to list nodes: %w", err) + } + + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].ID < nodes[j].ID + }) + + w := app.NewTabWriter(a.OutWriter) + if !a.NoHeaderFlag { + fmt.Fprintf(w, "ID\tADDRESS\tCONTROLLER\t\n") + } + + for _, n := range nodes { + fmt.Fprintf(w, "%v\t%v\t%v\t\n", n.ID, n.Addr(), n.IsController) + } + + w.Flush() + return nil + } +} diff --git a/pkg/cmd/produce/command.go b/pkg/cmd/produce/command.go new file mode 100644 index 00000000..9e9a5161 --- /dev/null +++ b/pkg/cmd/produce/command.go @@ -0,0 +1,275 @@ +package produce + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "strings" + "text/template" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/birdayz/kaf/pkg/app" +) + +// NewCommand returns the "kaf produce" command. +func NewCommand(a *app.App) *cobra.Command { + var ( + keyFlag string + rawKeyFlag bool + headerFlag []string + repeatFlag int + partitionerFlag string + timestampFlag string + partitionFlag int32 + bufferSizeFlag int + inputModeFlag string + avroSchemaID int + avroKeySchemaID int + templateFlag bool + inputFormatFlag = app.InputFormatDefault + ) + + cmd := &cobra.Command{ + Use: "produce TOPIC", + Short: "Produce record. Reads data from stdin.", + Long: "Produce records to a Kafka topic. Reads data from stdin, one record per line by default. Supports key specification, headers, partitioner selection, protobuf/avro encoding, and go templates.", + Example: ` echo '{"hello":"world"}' | kaf produce my-topic + echo 'value' | kaf produce my-topic -k my-key + echo 'value' | kaf produce my-topic -H "env:prod" -H "version:1" + echo 'value' | kaf produce my-topic -n 10 + cat data.json | kaf produce my-topic --input-mode full`, + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + PreRunE: a.SetupProtoDescriptorRegistry, + RunE: func(cmd *cobra.Command, args []string) error { + var opts []kgo.Opt + switch partitionerFlag { + case "jvm": + opts = append(opts, kgo.RecordPartitioner(kgo.StickyKeyPartitioner(nil))) + case "rand": + opts = append(opts, kgo.RecordPartitioner(kgo.StickyPartitioner())) + case "rr": + opts = append(opts, kgo.RecordPartitioner(kgo.RoundRobinPartitioner())) + } + + if partitionFlag != int32(-1) { + opts = append(opts, kgo.RecordPartitioner(kgo.ManualPartitioner())) + } + + cl, err := a.NewKafClient(opts...) + if err != nil { + return err + } + defer cl.Close() + + if avroSchemaID != -1 || avroKeySchemaID != -1 { + var scErr error + a.SchemaCache, scErr = a.NewSchemaCache() + if scErr != nil { + return scErr + } + if a.SchemaCache == nil { + return fmt.Errorf("could not connect to schema registry") + } + } + + var key []byte + if rawKeyFlag { + keyBytes, err := base64.RawStdEncoding.DecodeString(keyFlag) + if err != nil { + return fmt.Errorf("--raw-key is given, but value of --key is not base64") + } + key = keyBytes + } else if keyFlag != "" { + key = []byte(keyFlag) + } + if a.KeyProtoType != "" { + var err error + key, err = a.ProtoEncode([]byte(keyFlag), a.KeyProtoType) + if err != nil { + return fmt.Errorf("failed to encode proto key: %v", err) + } + } else if avroKeySchemaID != -1 { + avroKey, err := a.SchemaCache.EncodeMessage(avroKeySchemaID, []byte(keyFlag)) + if err != nil { + return fmt.Errorf("failed to encode avro key: %v", err) + } + key = avroKey + } + + var headers []kgo.RecordHeader + for _, h := range headerFlag { + v := strings.SplitN(h, ":", 2) + if len(v) == 2 { + headers = append(headers, kgo.RecordHeader{ + Key: v[0], + Value: []byte(v[1]), + }) + } + } + + ctx := cmd.Context() + + out := make(chan []byte, 1) + errCh := make(chan error, 1) + switch inputModeFlag { + case "full": + go readFull(a.InReader, out, errCh) + default: + go readLines(a.InReader, out, errCh, bufferSizeFlag) + } + + for data := range out { + for i := 0; i < repeatFlag; i++ { + input := data + + if templateFlag { + vars := map[string]any{"i": i} + tpl := template.New("kaf").Funcs(sprig.HermeticTxtFuncMap()) + + tpl, err := tpl.Parse(string(data)) + if err != nil { + return fmt.Errorf("failed to parse go template: %v", err) + } + + buf := bytes.NewBuffer(nil) + if err := tpl.Execute(buf, vars); err != nil { + return fmt.Errorf("failed to execute go template: %v", err) + } + input = buf.Bytes() + } + + var marshaledInput []byte + if a.ProtoType != "" { + var err error + marshaledInput, err = a.ProtoEncode(input, a.ProtoType) + if err != nil { + return fmt.Errorf("failed to encode proto value: %v", err) + } + } else if avroSchemaID != -1 { + avro, err := a.SchemaCache.EncodeMessage(avroSchemaID, input) + if err != nil { + return fmt.Errorf("failed to encode avro value: %v", err) + } + marshaledInput = avro + } else { + marshaledInput = input + } + + var ts time.Time + t, err := time.Parse(time.RFC3339, timestampFlag) + if err != nil { + ts = time.Now() + } else { + ts = t + } + + rec := &kgo.Record{ + Topic: args[0], + Key: key, + Value: marshaledInput, + Timestamp: ts, + } + + if inputFormatFlag == app.InputFormatJSONEachRow { + jsonEachRowMsg := app.JSONEachRowMessage{} + if err = json.Unmarshal(marshaledInput, &jsonEachRowMsg); err == nil { + if keyFlag == "" { + rec.Key = []byte(jsonEachRowMsg.Key) + } + for _, h := range jsonEachRowMsg.Headers { + rec.Headers = append(rec.Headers, kgo.RecordHeader{ + Key: h.Key, + Value: []byte(h.Value), + }) + } + rec.Partition = jsonEachRowMsg.Partition + rec.Value = []byte(jsonEachRowMsg.Payload) + } + } else if inputFormatFlag == app.InputFormatHex { + dst := make([]byte, hex.DecodedLen(len(marshaledInput))) + if _, err := hex.Decode(dst, marshaledInput); err != nil { + return fmt.Errorf("failed to decode hex input: %w", err) + } + rec.Value = dst + } + + if len(headers) > 0 { + rec.Headers = append(rec.Headers, headers...) + } + if partitionFlag != -1 { + rec.Partition = partitionFlag + } + + results := cl.KGO.ProduceSync(ctx, rec) + if err := results.FirstErr(); err != nil { + return fmt.Errorf("failed to send record: %w", err) + } + r := results[0].Record + fmt.Fprintf(a.OutWriter, "Sent record to partition %v at offset %v.\n", r.Partition, r.Offset) + } + } + + select { + case err := <-errCh: + return err + default: + return nil + } + }, + } + + cmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key for the record. Currently only strings are supported.") + cmd.Flags().BoolVar(&rawKeyFlag, "raw-key", false, "Treat value of --key as base64 and use its decoded raw value as key") + cmd.Flags().StringArrayVarP(&headerFlag, "header", "H", []string{}, "Header in format :. May be used multiple times to add more headers.") + cmd.Flags().IntVarP(&repeatFlag, "repeat", "n", 1, "Repeat records to send.") + a.AddProtoFlags(cmd) + cmd.Flags().StringVar(&partitionerFlag, "partitioner", "", "Select partitioner: [jvm|rand|rr]") + cmd.Flags().StringVar(×tampFlag, "timestamp", "", "Select timestamp for record") + cmd.Flags().Int32VarP(&partitionFlag, "partition", "p", -1, "Partition to produce to") + cmd.Flags().IntVarP(&avroSchemaID, "avro-schema-id", "", -1, "Value schema id for avro messsage encoding") + cmd.Flags().IntVarP(&avroKeySchemaID, "avro-key-schema-id", "", -1, "Key schema id for avro messsage encoding") + cmd.Flags().StringVarP(&inputModeFlag, "input-mode", "", "line", "Scanning input mode: [line|full]") + cmd.Flags().Var(&inputFormatFlag, "input", "Set input format messages: default, hex, json-each-row (json-each-row is compatible with output of kaf consume --output json-each-row)") + cmd.Flags().IntVarP(&bufferSizeFlag, "line-length-limit", "", 0, "line length limit in line input mode") + cmd.Flags().BoolVar(&templateFlag, "template", false, "run data through go template engine") + + if err := cmd.RegisterFlagCompletionFunc("input", app.CompleteInputFormat); err != nil { + panic(fmt.Sprintf("Failed to register flag completion: %v", err)) + } + + return cmd +} + +func readLines(reader io.Reader, out chan []byte, errCh chan<- error, bufferSize int) { + scanner := bufio.NewScanner(reader) + if bufferSize > 0 { + scanner.Buffer(make([]byte, bufferSize), bufferSize) + } + for scanner.Scan() { + out <- bytes.Clone(scanner.Bytes()) + } + close(out) + if err := scanner.Err(); err != nil { + errCh <- fmt.Errorf("scanning input failed: %w", err) + } +} + +func readFull(reader io.Reader, out chan []byte, errCh chan<- error) { + data, err := io.ReadAll(reader) + if err != nil { + close(out) + errCh <- fmt.Errorf("unable to read data: %w", err) + return + } + out <- data + close(out) +} diff --git a/pkg/cmd/query/command.go b/pkg/cmd/query/command.go new file mode 100644 index 00000000..73204596 --- /dev/null +++ b/pkg/cmd/query/command.go @@ -0,0 +1,143 @@ +package query + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/birdayz/kaf/pkg/app" +) + +// NewCommand returns the "kaf query" command. +func NewCommand(a *app.App) *cobra.Command { + var ( + keyFlag string + grepValue string + ) + + cmd := &cobra.Command{ + Use: "query TOPIC", + Short: "Query topic by key", + Long: "Query a topic by scanning all partitions from the start and filtering by key. Optionally grep the value. Stops when all partitions reach the high watermark.", + Example: ` kaf query my-topic -k my-key + kaf query my-topic -k my-key --grep "error"`, + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + PreRunE: a.SetupProtoDescriptorRegistry, + RunE: func(cmd *cobra.Command, args []string) error { + topic := args[0] + ctx := cmd.Context() + + // Get end offsets so we know when to stop. + cl, err := a.NewKafClient() + if err != nil { + return err + } + endOffsets, err := cl.Admin.ListEndOffsets(ctx, topic) + if err != nil { + cl.Close() + return fmt.Errorf("failed to get end offsets: %w", err) + } + cl.Close() + + endOffsetMap := app.EndOffsetMapForTopic(endOffsets, topic) + + // Check if topic is empty. + empty := true + for _, off := range endOffsetMap { + if off > 0 { + empty = false + break + } + } + if empty { + return nil + } + + var scErr error + a.SchemaCache, scErr = a.NewSchemaCache() + if scErr != nil { + return scErr + } + + // Consume all partitions from the start. + kafCl, err := a.NewKafClient( + kgo.ConsumeTopics(topic), + kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), + ) + if err != nil { + return err + } + defer kafCl.Close() + + for { + fetches := kafCl.KGO.PollFetches(ctx) + if fetches.IsClientClosed() || ctx.Err() != nil { + return nil + } + + fetches.EachRecord(func(rec *kgo.Record) { + if string(rec.Key) != keyFlag { + return + } + + var keyTextRaw string + var valueTextRaw string + if a.ProtoType != "" { + d, err := app.ProtoDecode(a.Reg, rec.Value, a.ProtoType) + if err != nil { + fmt.Fprintln(a.ErrWriter, "Failed proto decode") + } + valueTextRaw = string(d) + } else { + valueTextRaw = string(rec.Value) + } + + if a.KeyProtoType != "" { + d, err := app.ProtoDecode(a.Reg, rec.Key, a.KeyProtoType) + if err != nil { + fmt.Fprintln(a.ErrWriter, "Failed proto decode") + } + keyTextRaw = string(d) + } else { + keyTextRaw = string(rec.Key) + } + + if grepValue != "" && !strings.Contains(valueTextRaw, grepValue) { + return + } + + fmt.Fprintf(a.OutWriter, "Key: %v\n", keyTextRaw) + fmt.Fprintf(a.OutWriter, "Value: %v\n", valueTextRaw) + }) + + if errs := fetches.Errors(); len(errs) > 0 { + for _, e := range errs { + fmt.Fprintf(a.ErrWriter, "fetch error topic %s partition %d: %v\n", e.Topic, e.Partition, e.Err) + } + } + + // Check if we've reached end offsets for all partitions. + done := true + fetches.EachRecord(func(rec *kgo.Record) { + if endOff, ok := endOffsetMap[rec.Partition]; ok { + if rec.Offset+1 < endOff { + done = false + } + } + }) + if done { + return nil + } + } + }, + } + + cmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key to search for") + a.AddProtoFlags(cmd) + cmd.Flags().StringVar(&grepValue, "grep", "", "Grep for value") + + return cmd +} diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go new file mode 100644 index 00000000..312decc8 --- /dev/null +++ b/pkg/cmd/root.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + "github.com/birdayz/kaf/pkg/app" + "github.com/birdayz/kaf/pkg/cmd/completion" + kafconfig "github.com/birdayz/kaf/pkg/cmd/config" + "github.com/birdayz/kaf/pkg/cmd/consume" + "github.com/birdayz/kaf/pkg/cmd/group" + "github.com/birdayz/kaf/pkg/cmd/node" + "github.com/birdayz/kaf/pkg/cmd/produce" + "github.com/birdayz/kaf/pkg/cmd/query" + "github.com/birdayz/kaf/pkg/cmd/topic" +) + +// Execute is the single entry point for the CLI. +func Execute(version, commit string) error { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer stop() + + a := app.New() + + root := &cobra.Command{ + Use: "kaf", + Short: "Kafka Command Line utility for cluster management", + Version: fmt.Sprintf("%s (%s)", version, commit), + SilenceUsage: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + a.OutWriter = cmd.OutOrStdout() + a.ErrWriter = cmd.ErrOrStderr() + a.InReader = cmd.InOrStdin() + + if a.OutWriter != os.Stdout { + a.ColorableOut = a.OutWriter + } + + return a.InitConfig() + }, + } + + // Deprecated: --verbose was for sarama logging, no-op with franz-go. + var verbose bool + root.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Whether to turn on sarama logging") + _ = root.PersistentFlags().MarkDeprecated("verbose", "no longer applicable (sarama removed)") + + root.PersistentFlags().StringVar(&a.CfgFile, "config", "", "config file (default is $HOME/.kaf/config)") + root.PersistentFlags().StringSliceVarP(&a.BrokersFlag, "brokers", "b", nil, "Comma separated list of broker ip:port pairs") + root.PersistentFlags().StringVar(&a.SchemaRegistryURL, "schema-registry", "", "URL to a Confluent schema registry. Used for attempting to decode Avro-encoded messages") + root.PersistentFlags().StringVarP(&a.ClusterOverride, "cluster", "c", "", "set a temporary current cluster") + + root.AddCommand( + consume.NewCommand(a), + produce.NewCommand(a), + topic.NewCommand(a), + topic.NewTopicsAlias(a), + group.NewCommand(a), + group.NewGroupsAlias(a), + node.NewCommand(a), + node.NewNodesAlias(a), + query.NewCommand(a), + kafconfig.NewCommand(a), + completion.NewCommand(root, a), + ) + + a.Root = root + return root.ExecuteContext(ctx) +} diff --git a/pkg/cmd/topic/command.go b/pkg/cmd/topic/command.go new file mode 100644 index 00000000..abe80084 --- /dev/null +++ b/pkg/cmd/topic/command.go @@ -0,0 +1,507 @@ +package topic + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/manifoldco/promptui" + "github.com/spf13/cobra" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kmsg" + + "github.com/birdayz/kaf/pkg/app" + pkgtopic "github.com/birdayz/kaf/pkg/topic" +) + +// NewCommand returns the "kaf topic" parent command. +func NewCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "topic", + Short: "Create and describe topics.", + } + cmd.AddCommand( + newCreateCommand(a), + newDeleteCommand(a), + newListCommand(a), + newDescribeCommand(a), + newAddConfigCommand(a), + newRemoveConfigCommand(a), + newSetConfigCommand(a), + newUpdateCommand(a), + newLagCommand(a), + ) + return cmd +} + +// NewTopicsAlias returns the "kaf topics" alias for "kaf topic ls". +func NewTopicsAlias(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "topics", + Short: "List topics", + Args: cobra.ExactArgs(0), + RunE: listTopicsRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func listTopicsRunE(a *app.App) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + topics, err := pkgtopic.List(cmd.Context(), cl.Admin) + if err != nil { + return fmt.Errorf("unable to list topics: %w", err) + } + + w := app.NewTabWriter(a.OutWriter) + if !a.NoHeaderFlag { + fmt.Fprintf(w, "NAME\tPARTITIONS\tREPLICAS\t\n") + } + for _, t := range topics { + fmt.Fprintf(w, "%v\t%v\t%v\t\n", t.Name, t.Partitions, t.ReplicationFactor) + } + w.Flush() + return nil + } +} + +func newListCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List topics", + Args: cobra.ExactArgs(0), + RunE: listTopicsRunE(a), + } + a.AddNoHeadersFlag(cmd) + return cmd +} + +func newDescribeCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "describe TOPIC", + Short: "Describe topic", + Long: "Describe a topic. Default values of the configuration are omitted.", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + desc, err := pkgtopic.Describe(cmd.Context(), cl.Admin, args[0]) + if err != nil { + return fmt.Errorf("unable to describe topic: %w", err) + } + + w := app.NewTabWriter(a.OutWriter) + fmt.Fprintf(w, "Name:\t%v\t\n", desc.Name) + fmt.Fprintf(w, "Internal:\t%v\t\n", desc.IsInternal) + fmt.Fprintf(w, "Compacted:\t%v\t\n", desc.Compacted) + fmt.Fprintf(w, "Partitions:\n") + + w.Flush() + w.Init(a.OutWriter, app.TabwriterMinWidthNested, 4, 2, app.TabwriterPadChar, app.TabwriterFlags) + + fmt.Fprintf(w, "\tPartition\tHigh Watermark\tLeader\tReplicas\tISR\t\n") + fmt.Fprintf(w, "\t---------\t--------------\t------\t--------\t---\t\n") + + highWatermarksSum := int64(0) + for _, p := range desc.Partitions { + highWatermarksSum += p.HighWatermark + fmt.Fprintf(w, "\t%v\t%v\t%v\t%v\t%v\t\n", p.ID, p.HighWatermark, p.Leader, p.Replicas, p.ISR) + } + + w.Flush() + fmt.Fprintf(w, "Summed HighWatermark:\t%d\n", highWatermarksSum) + w.Flush() + + fmt.Fprintf(w, "Config:\n") + fmt.Fprintf(w, "\tName\tValue\tSensitive\t\n") + fmt.Fprintf(w, "\t----\t-----\t---------\t\n") + + for _, entry := range desc.Configs { + if entry.IsDefault { + continue + } + fmt.Fprintf(w, "\t%v\t%v\t%v\t\n", entry.Name, entry.Value, entry.Sensitive) + } + + w.Flush() + return nil + }, + } +} + +func newCreateCommand(a *app.App) *cobra.Command { + var ( + partitionsFlag int32 + replicasFlag int16 + compactFlag bool + ) + cmd := &cobra.Command{ + Use: "create TOPIC", + Short: "Create a topic", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + topicName := args[0] + configs := make(map[string]*string) + if compactFlag { + compact := "compact" + configs["cleanup.policy"] = &compact + } + + err = pkgtopic.Create(cmd.Context(), cl.Admin, topicName, partitionsFlag, replicasFlag, configs) + if err != nil { + return fmt.Errorf("could not create topic %v: %w", topicName, err) + } + + w := app.NewTabWriter(a.OutWriter) + fmt.Fprintf(w, "Created topic.\n") + fmt.Fprintln(w, "\tTopic Name:\t", topicName) + fmt.Fprintln(w, "\tPartitions:\t", partitionsFlag) + fmt.Fprintln(w, "\tReplication Factor:\t", replicasFlag) + if compactFlag { + fmt.Fprintln(w, "\tCleanup Policy:\tcompact") + } + w.Flush() + return nil + }, + } + cmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(1), "Number of partitions") + cmd.Flags().Int16VarP(&replicasFlag, "replicas", "r", int16(1), "Number of replicas") + cmd.Flags().BoolVar(&compactFlag, "compact", false, "Enable topic compaction") + return cmd +} + +func newDeleteCommand(a *app.App) *cobra.Command { + var noconfirm bool + cmd := &cobra.Command{ + Use: "delete TOPIC", + Short: "Delete a topic", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + topicName := args[0] + + if !noconfirm { + prompt := promptui.Prompt{ + Label: fmt.Sprintf("Delete topic %q", topicName), + IsConfirm: true, + } + if _, err := prompt.Run(); err != nil { + return fmt.Errorf("aborted, exiting") + } + } + + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + err = pkgtopic.Delete(cmd.Context(), cl.Admin, topicName) + if err != nil { + return fmt.Errorf("could not delete topic %v: %w", topicName, err) + } + fmt.Fprintf(a.OutWriter, "Deleted topic %v.\n", topicName) + return nil + }, + } + cmd.Flags().BoolVar(&noconfirm, "noconfirm", false, "Do not prompt for confirmation") + return cmd +} + +func newAddConfigCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "add-config TOPIC KEY VALUE", + Short: "Add config key/value pair to topic", + Args: cobra.ExactArgs(3), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + topicName := args[0] + key := args[1] + value := args[2] + + resps, err := cl.Admin.AlterTopicConfigs(cmd.Context(), []kadm.AlterConfig{ + {Name: key, Value: &value}, + }, topicName) + if err != nil { + return fmt.Errorf("failed to update topic config: %w", err) + } + for _, r := range resps { + if r.Err != nil { + return fmt.Errorf("failed to update topic config: %w", r.Err) + } + } + fmt.Fprintf(a.OutWriter, "Added config %v=%v to topic %v.\n", key, value, topicName) + return nil + }, + } +} + +func newRemoveConfigCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "rm-config TOPIC ATTR1,ATTR2...", + Short: "Remove attributes from topic", + Args: cobra.ExactArgs(2), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + topicName := args[0] + attrsToRemove := strings.Split(args[1], ",") + + var deleteConfigs []kadm.AlterConfig + for _, attr := range attrsToRemove { + deleteConfigs = append(deleteConfigs, kadm.AlterConfig{ + Op: kadm.DeleteConfig, + Name: attr, + }) + } + + resps, err := cl.Admin.AlterTopicConfigs(cmd.Context(), deleteConfigs, topicName) + if err != nil { + return fmt.Errorf("failed to remove attributes from topic config: %w", err) + } + for _, r := range resps { + if r.Err != nil { + return fmt.Errorf("failed to remove attributes from topic config: %w", r.Err) + } + } + fmt.Fprintf(a.OutWriter, "Removed attributes %v from topic %v.\n", attrsToRemove, topicName) + return nil + }, + } +} + +func newSetConfigCommand(a *app.App) *cobra.Command { + return &cobra.Command{ + Use: "set-config", + Short: "set topic config. requires Kafka >=2.3.0 on broker side and kaf cluster config.", + Example: "kaf topic set-config topic.name \"cleanup.policy=delete\"", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + topicName := args[0] + splt := strings.Split(args[1], ",") + configs := make(map[string]string) + + for _, kv := range splt { + s := strings.Split(kv, "=") + if len(s) != 2 { + continue + } + configs[s[0]] = s[1] + } + + if len(configs) < 1 { + return fmt.Errorf("no valid configs found") + } + + err = pkgtopic.SetConfig(cmd.Context(), cl.Admin, topicName, configs) + if err != nil { + return fmt.Errorf("unable to alter topic config: %w", err) + } + fmt.Fprintf(a.OutWriter, "Updated config.\n") + return nil + }, + } +} + +func newUpdateCommand(a *app.App) *cobra.Command { + var ( + partitionsFlag int32 + partitionAssignmentsFlag string + ) + cmd := &cobra.Command{ + Use: "update TOPIC", + Short: "Update topic", + Example: "kaf topic update -p 5 --partition-assignments '[[1,2,3],[1,2,3]]'", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + if partitionsFlag == -1 && partitionAssignmentsFlag == "" { + return fmt.Errorf("number of partitions and/or partition assignments must be given") + } + + if partitionsFlag != int32(-1) { + err := pkgtopic.UpdatePartitions(cmd.Context(), cl.Admin, args[0], int(partitionsFlag)) + if err != nil { + return fmt.Errorf("failed to update partitions: %w", err) + } + } + + if partitionAssignmentsFlag != "" { + var assignments [][]int32 + if err := json.Unmarshal([]byte(partitionAssignmentsFlag), &assignments); err != nil { + return fmt.Errorf("invalid partition assignments: %w", err) + } + + req := kmsg.NewAlterPartitionAssignmentsRequest() + reqTopic := kmsg.NewAlterPartitionAssignmentsRequestTopic() + reqTopic.Topic = args[0] + for i, brokers := range assignments { + p := kmsg.NewAlterPartitionAssignmentsRequestTopicPartition() + p.Partition = int32(i) + p.Replicas = brokers + reqTopic.Partitions = append(reqTopic.Partitions, p) + } + req.Topics = append(req.Topics, reqTopic) + + resp, err := req.RequestWith(cmd.Context(), cl.KGO) + if err != nil { + return fmt.Errorf("failed to reassign the partition assignments: %w", err) + } + for _, t := range resp.Topics { + for _, p := range t.Partitions { + if p.ErrorCode != 0 { + return fmt.Errorf("failed to reassign partition %v: %v", p.Partition, p.ErrorMessage) + } + } + } + } + fmt.Fprintf(a.OutWriter, "Updated topic.\n") + return nil + }, + } + cmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(-1), "Number of partitions") + cmd.Flags().StringVar(&partitionAssignmentsFlag, "partition-assignments", "", "Partition Assignments. Optional. If set in combination with -p, an assignment must be provided for each new partition. Example: '[[1,2,3],[1,2,3]]' (JSON Array syntax) assigns two new partitions to brokers 1,2,3. If used by itself, a reassignment must be provided for all partitions.") + return cmd +} + +func newLagCommand(a *app.App) *cobra.Command { + cmd := &cobra.Command{ + Use: "lag TOPIC", + Short: "Display the total lags for each consumer group", + Args: cobra.ExactArgs(1), + ValidArgsFunction: a.ValidTopicArgs, + RunE: func(cmd *cobra.Command, args []string) error { + topicName := args[0] + cl, err := a.NewKafClient() + if err != nil { + return err + } + defer cl.Close() + + ctx := cmd.Context() + + endOffsets, err := cl.Admin.ListEndOffsets(ctx, topicName) + if err != nil { + return fmt.Errorf("unable to list end offsets: %w", err) + } + + groups, err := cl.Admin.ListGroups(ctx) + if err != nil { + return fmt.Errorf("unable to list consumer groups: %w", err) + } + + groupNames := groups.Groups() + sort.Strings(groupNames) + + described, err := cl.Admin.DescribeGroups(ctx, groupNames...) + if err != nil { + return fmt.Errorf("unable to describe consumer groups: %w", err) + } + + type lagEntry struct { + group string + state string + lag int64 + } + + var lagEntries []lagEntry + for _, g := range described.Sorted() { + var hasTopic bool + for _, m := range g.Members { + if ca, ok := m.Assigned.AsConsumer(); ok { + for _, t := range ca.Topics { + if t.Topic == topicName { + hasTopic = true + break + } + } + } + if hasTopic { + break + } + } + + if !hasTopic { + continue + } + + offsets, err := cl.Admin.FetchOffsetsForTopics(ctx, g.Group, topicName) + if err != nil { + continue + } + + var totalLag int64 + offsets.Each(func(o kadm.OffsetResponse) { + if o.Topic != topicName { + return + } + if endO, ok := endOffsets.Lookup(topicName, o.Partition); ok { + if o.Offset.At >= 0 && endO.Offset > o.Offset.At { + totalLag += endO.Offset - o.Offset.At + } + } + }) + + lagEntries = append(lagEntries, lagEntry{ + group: g.Group, + state: g.State, + lag: totalLag, + }) + } + + w := app.NewTabWriter(a.OutWriter) + if !a.NoHeaderFlag { + fmt.Fprintf(w, "GROUP ID\tSTATE\tLAG\n") + } + for _, e := range lagEntries { + fmt.Fprintf(w, "%v\t%v\t%v\n", e.group, e.state, e.lag) + } + w.Flush() + return nil + }, + } + a.AddNoHeadersFlag(cmd) + return cmd +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 698ee07b..6fb21fdf 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -5,8 +5,7 @@ import ( "os" "path/filepath" - homedir "github.com/mitchellh/go-homedir" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) type SASL struct { @@ -46,15 +45,23 @@ type Cluster struct { type Config struct { CurrentCluster string `yaml:"current-cluster"` - ClusterOverride string + ClusterOverride string `yaml:"-"` Clusters []*Cluster `yaml:"clusters"` + // configPath is the file path used for reading and writing this config. + configPath string `yaml:"-"` } -func (c *Config) SetCurrentCluster(name string) error { - var oldCluster string - if c.ActiveCluster() != nil { - oldCluster = c.ActiveCluster().Name +func (c *Config) HasCluster(name string) bool { + for _, cluster := range c.Clusters { + if cluster.Name == name { + return true + } } + return false +} + +func (c *Config) SetCurrentCluster(name string) error { + oldCluster := c.CurrentCluster for _, cluster := range c.Clusters { if cluster.Name == name { c.CurrentCluster = name @@ -70,7 +77,7 @@ func (c *Config) SetCurrentCluster(name string) error { } } - return fmt.Errorf("Could not find cluster with name %v", name) + return fmt.Errorf("could not find cluster with name %v", name) } func (c *Config) ActiveCluster() *Cluster { @@ -89,73 +96,107 @@ func (c *Config) ActiveCluster() *Cluster { for _, cluster := range c.Clusters { if cluster.Name == toSearch { - // Make copy of cluster struct, using a pointer leads to unintended - // behavior where modifications on currentCluster are written back - // into the config - c := *cluster - return &c + // Return a copy to prevent modifications from leaking back + // into the config's cluster list. + cp := *cluster + return &cp } } return nil } func (c *Config) Write() error { - home, err := homedir.Dir() - if err != nil { - return err + configPath := c.configPath + if configPath == "" { + var err error + configPath, err = getDefaultConfigPath() + if err != nil { + return err + } + } + configDir := filepath.Dir(configPath) + if err := os.MkdirAll(configDir, 0700); err != nil { + return fmt.Errorf("create config directory: %w", err) } - configDir := filepath.Join(home, ".kaf") - _ = os.MkdirAll(configDir, 0755) - configPath := filepath.Join(configDir, "config") - - file, err := os.OpenFile(configPath, os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0644) + tmpFile, err := os.CreateTemp(configDir, "config.*.tmp") if err != nil { - panic(err) + return fmt.Errorf("create temp config file: %w", err) } - defer file.Close() + tmpPath := tmpFile.Name() - encoder := yaml.NewEncoder(file) - return encoder.Encode(&c) + encoder := yaml.NewEncoder(tmpFile) + if err := encoder.Encode(&c); err != nil { + tmpFile.Close() + os.Remove(tmpPath) + return fmt.Errorf("encode config: %w", err) + } + if err := encoder.Close(); err != nil { + tmpFile.Close() + os.Remove(tmpPath) + return fmt.Errorf("flush config encoder: %w", err) + } + if err := tmpFile.Close(); err != nil { + os.Remove(tmpPath) + return fmt.Errorf("close temp config file: %w", err) + } + if err := os.Chmod(tmpPath, 0600); err != nil { + os.Remove(tmpPath) + return fmt.Errorf("chmod temp config file: %w", err) + } + if err := os.Rename(tmpPath, configPath); err != nil { + os.Remove(tmpPath) + return fmt.Errorf("rename temp config file: %w", err) + } + return nil } func ReadConfig(cfgPath string) (c Config, err error) { - file, err := os.OpenFile(getConfigPath(cfgPath), os.O_RDONLY, 0644) + resolvedPath, err := resolveConfigPath(cfgPath) + if err != nil { + return Config{}, err + } + + file, err := os.OpenFile(resolvedPath, os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { - return Config{}, nil + return Config{configPath: resolvedPath}, nil } - return Config{}, err + return Config{}, fmt.Errorf("open config file: %w", err) } defer file.Close() decoder := yaml.NewDecoder(file) err = decoder.Decode(&c) if err != nil { - return Config{}, err + return Config{}, fmt.Errorf("decode config: %w", err) } + c.configPath = resolvedPath return c, nil } func fileExists(filename string) bool { info, err := os.Stat(filename) - if os.IsNotExist(err) { + if err != nil { return false } return !info.IsDir() } -func getConfigPath(cfgPath string) string { - if !fileExists(cfgPath) { +func resolveConfigPath(cfgPath string) (string, error) { + if cfgPath == "" { return getDefaultConfigPath() } - return cfgPath + if !fileExists(cfgPath) { + return "", fmt.Errorf("config file %q does not exist", cfgPath) + } + return cfgPath, nil } -func getDefaultConfigPath() string { - home, err := homedir.Dir() +func getDefaultConfigPath() (string, error) { + home, err := os.UserHomeDir() if err != nil { - panic(err) + return "", fmt.Errorf("could not determine home directory: %w", err) } - return filepath.Join(home, ".kaf", "config") + return filepath.Join(home, ".kaf", "config"), nil } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 00000000..055532a9 --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,104 @@ +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReadConfig_LegacyYAML(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config") + err := os.WriteFile(path, []byte(`current-cluster: local +clusters: + - name: local + brokers: + - localhost:9092 + SASL: + mechanism: PLAIN + username: admin + password: secret + TLS: + cafile: /etc/ssl/ca.pem + insecure: true + security-protocol: SASL_SSL + schema-registry-url: http://localhost:8081 + schema-registry-credentials: + username: sr-user + password: sr-pass +`), 0644) + require.NoError(t, err) + + cfg, err := ReadConfig(path) + require.NoError(t, err) + require.Equal(t, "local", cfg.CurrentCluster) + require.Len(t, cfg.Clusters, 1) + + c := cfg.Clusters[0] + require.Equal(t, "local", c.Name) + require.Equal(t, []string{"localhost:9092"}, c.Brokers) + require.Equal(t, "SASL_SSL", c.SecurityProtocol) + require.Equal(t, "http://localhost:8081", c.SchemaRegistryURL) + + require.NotNil(t, c.SASL) + require.Equal(t, "PLAIN", c.SASL.Mechanism) + require.Equal(t, "admin", c.SASL.Username) + require.Equal(t, "secret", c.SASL.Password) + + require.NotNil(t, c.TLS) + require.Equal(t, "/etc/ssl/ca.pem", c.TLS.Cafile) + require.True(t, c.TLS.Insecure) + + require.NotNil(t, c.SchemaRegistryCredentials) + require.Equal(t, "sr-user", c.SchemaRegistryCredentials.Username) + require.Equal(t, "sr-pass", c.SchemaRegistryCredentials.Password) +} + +func TestReadConfig_ExplicitPathMustExist(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "nonexistent") + _, err := ReadConfig(path) + require.Error(t, err) +} + +func TestHasCluster(t *testing.T) { + cfg := Config{ + Clusters: []*Cluster{ + {Name: "a"}, + {Name: "b"}, + }, + } + require.True(t, cfg.HasCluster("a")) + require.True(t, cfg.HasCluster("b")) + require.False(t, cfg.HasCluster("c")) +} + +func TestActiveCluster(t *testing.T) { + cfg := Config{ + CurrentCluster: "prod", + Clusters: []*Cluster{ + {Name: "dev", Brokers: []string{"dev:9092"}}, + {Name: "prod", Brokers: []string{"prod:9092"}}, + }, + } + + c := cfg.ActiveCluster() + require.NotNil(t, c) + require.Equal(t, "prod", c.Name) + + // ClusterOverride takes precedence. + cfg.ClusterOverride = "dev" + c = cfg.ActiveCluster() + require.NotNil(t, c) + require.Equal(t, "dev", c.Name) +} + +func TestActiveCluster_NotFound(t *testing.T) { + cfg := Config{ + CurrentCluster: "missing", + Clusters: []*Cluster{{Name: "other"}}, + } + require.Nil(t, cfg.ActiveCluster()) +} diff --git a/pkg/config/confluent_cloud.go b/pkg/config/confluent_cloud.go index 0514f5c8..2000ac46 100644 --- a/pkg/config/confluent_cloud.go +++ b/pkg/config/confluent_cloud.go @@ -2,26 +2,25 @@ package config import ( "errors" + "fmt" "path/filepath" "strings" "os" "github.com/magiconair/properties" - homedir "github.com/mitchellh/go-homedir" ) // Default confluent cloud config file path var defaultCcloudSubpath = filepath.Join(".ccloud", "config") func TryFindCcloudConfigFile() (string, error) { - homedir, err := homedir.Dir() + home, err := os.UserHomeDir() if err != nil { - return "", err } - absoluteDefaultPath := filepath.Join(homedir, defaultCcloudSubpath) + absoluteDefaultPath := filepath.Join(home, defaultCcloudSubpath) _, err = os.Stat(absoluteDefaultPath) if err == nil { @@ -32,15 +31,23 @@ func TryFindCcloudConfigFile() (string, error) { } func extractValue(key, input string) (unquoted string, ok bool) { - if strings.HasPrefix(input, key+"=") { - return strings.TrimRight(strings.Replace(strings.TrimPrefix(input, key+"="), "\"", "", -1), ";"), true + if after, found := strings.CutPrefix(input, key+"="); found { + v := strings.TrimRight(after, ";") + v = strings.TrimPrefix(v, "\"") + v = strings.TrimSuffix(v, "\"") + return v, true } return } // TODO return []string for brokers func ParseConfluentCloudConfig(path string) (username, password, broker string, err error) { - p := properties.MustLoadFile(path, properties.UTF8).Map() + props, loadErr := properties.LoadFile(path, properties.UTF8) + if loadErr != nil { + err = fmt.Errorf("failed to load confluent cloud config: %w", loadErr) + return + } + p := props.Map() if _, ok := p["sasl.jaas.config"]; !ok { err = errors.New("invalid or unsupported confluent cloud config") return @@ -52,21 +59,17 @@ func ParseConfluentCloudConfig(path string) (username, password, broker string, words := strings.Split(p["sasl.jaas.config"], " ") - jaasOk := true for _, word := range words { if result, ok := extractValue("username", word); ok { username = result - jaasOk = jaasOk && ok } if result, ok := extractValue("password", word); ok { password = result - jaasOk = jaasOk && ok } - } - if !jaasOk { - return "", "", "", errors.New("Could not parse sasl.jaas.config from ccloud") + if username == "" || password == "" { + return "", "", "", errors.New("could not parse username/password from sasl.jaas.config") } broker = p["bootstrap.servers"] diff --git a/pkg/group/group.go b/pkg/group/group.go new file mode 100644 index 00000000..5b79ce30 --- /dev/null +++ b/pkg/group/group.go @@ -0,0 +1,230 @@ +package group + +import ( + "context" + "fmt" + "sort" + + "github.com/twmb/franz-go/pkg/kadm" +) + +// GroupInfo holds summary info for a consumer group listing. +type GroupInfo struct { + Name string + State string + Consumers int +} + +// List returns all consumer groups with state and member count. +func List(ctx context.Context, admin *kadm.Client) ([]GroupInfo, error) { + listed, err := admin.ListGroups(ctx) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(listed)) + for name := range listed { + names = append(names, name) + } + sort.Strings(names) + + described, err := admin.DescribeGroups(ctx, names...) + if err != nil { + return nil, fmt.Errorf("describe groups: %w", err) + } + + result := make([]GroupInfo, 0, len(described)) + for _, g := range described.Sorted() { + if g.Err != nil { + continue + } + result = append(result, GroupInfo{ + Name: g.Group, + State: g.State, + Consumers: len(g.Members), + }) + } + return result, nil +} + +// MemberInfo holds a single member's details. +type MemberInfo struct { + ClientID string + ClientHost string + // TopicPartitions maps topic -> assigned partitions. + TopicPartitions map[string][]int32 +} + +// PartitionOffset holds per-partition offset and lag info. +type PartitionOffset struct { + Partition int32 + GroupOffset int64 + HighWatermark int64 + Lag int64 + Metadata string +} + +// TopicOffsets groups partition offsets per topic, plus totals. +type TopicOffsets struct { + Topic string + Partitions []PartitionOffset + TotalLag int64 + TotalOffset int64 +} + +// GroupDescription holds the full description of a consumer group. +type GroupDescription struct { + Group string + State string + Protocol string + ProtocolType string + Topics []TopicOffsets + Members []MemberInfo +} + +// Describe returns a full description of a consumer group including offsets and lag. +func Describe(ctx context.Context, admin *kadm.Client, group string, filterTopics []string) (*GroupDescription, error) { + described, err := admin.DescribeGroups(ctx, group) + if err != nil { + return nil, err + } + + g, ok := described[group] + if !ok { + return nil, fmt.Errorf("group %s not found", group) + } + if g.Err != nil { + return nil, g.Err + } + if g.State == "Dead" { + return nil, fmt.Errorf("group %s not found", group) + } + + // Fetch committed offsets for all topics. + offsets, err := admin.FetchOffsets(ctx, group) + if err != nil { + return nil, fmt.Errorf("fetch offsets: %w", err) + } + + // Collect topics from the offsets. + topicNames := make([]string, 0) + topicPartitions := make(map[string][]int32) + offsets.Each(func(o kadm.OffsetResponse) { + topicPartitions[o.Topic] = append(topicPartitions[o.Topic], o.Partition) + }) + for t := range topicPartitions { + topicNames = append(topicNames, t) + } + sort.Strings(topicNames) + + // Get high watermarks for these topics. + endOffsets, err := admin.ListEndOffsets(ctx, topicNames...) + if err != nil { + return nil, fmt.Errorf("list end offsets: %w", err) + } + + filterSet := make(map[string]bool, len(filterTopics)) + for _, t := range filterTopics { + filterSet[t] = true + } + + var topics []TopicOffsets + for _, topicName := range topicNames { + if len(filterSet) > 0 && !filterSet[topicName] { + continue + } + + parts := topicPartitions[topicName] + sort.Slice(parts, func(i, j int) bool { return parts[i] < parts[j] }) + + var to TopicOffsets + to.Topic = topicName + + for _, p := range parts { + o, _ := offsets.Lookup(topicName, p) + var hwm int64 + if eo, ok := endOffsets.Lookup(topicName, p); ok { + hwm = eo.Offset + } + lag := hwm - o.Offset.At + if lag < 0 { + lag = 0 + } + to.Partitions = append(to.Partitions, PartitionOffset{ + Partition: p, + GroupOffset: o.Offset.At, + HighWatermark: hwm, + Lag: lag, + Metadata: o.Offset.Metadata, + }) + to.TotalLag += lag + if o.Offset.At >= 0 { + to.TotalOffset += o.Offset.At + } + } + topics = append(topics, to) + } + + // Members. + var members []MemberInfo + for _, m := range g.Members { + mi := MemberInfo{ + ClientID: m.ClientID, + ClientHost: m.ClientHost, + TopicPartitions: make(map[string][]int32), + } + if ca, ok := m.Assigned.AsConsumer(); ok { + for _, t := range ca.Topics { + mi.TopicPartitions[t.Topic] = t.Partitions + } + } + members = append(members, mi) + } + + return &GroupDescription{ + Group: g.Group, + State: g.State, + Protocol: g.Protocol, + ProtocolType: g.ProtocolType, + Topics: topics, + Members: members, + }, nil +} + +// Delete deletes a consumer group. +func Delete(ctx context.Context, admin *kadm.Client, group string) error { + resp, err := admin.DeleteGroup(ctx, group) + if err != nil { + return err + } + return resp.Err +} + +// CommitOffsets commits offsets for a topic in a consumer group. +func CommitOffsets(ctx context.Context, admin *kadm.Client, group string, topicOffsets map[string]map[int32]int64) error { + os := make(kadm.Offsets) + for topic, partitions := range topicOffsets { + for partition, offset := range partitions { + os.Add(kadm.Offset{ + Topic: topic, + Partition: partition, + At: offset, + }) + } + } + return admin.CommitAllOffsets(ctx, group, os) +} + +// ListGroupNames returns just the group names (for shell completion). +func ListGroupNames(ctx context.Context, admin *kadm.Client) ([]string, error) { + listed, err := admin.ListGroups(ctx) + if err != nil { + return nil, err + } + names := make([]string, 0, len(listed)) + for name := range listed { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} diff --git a/pkg/group/group_integration_test.go b/pkg/group/group_integration_test.go new file mode 100644 index 00000000..72efcf9b --- /dev/null +++ b/pkg/group/group_integration_test.go @@ -0,0 +1,347 @@ +package group + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" +) + +func createAdminClient(t *testing.T, brokers string) *kadm.Client { + t.Helper() + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + t.Cleanup(func() { cl.Close() }) + admin := kadm.NewClient(cl) + t.Cleanup(func() { admin.Close() }) + return admin +} + +func createTopic(t *testing.T, ctx context.Context, admin *kadm.Client, topic string, partitions int32) { + t.Helper() + resp, err := admin.CreateTopics(ctx, partitions, 1, nil, topic) + require.NoError(t, err) + for _, r := range resp { + require.NoError(t, r.Err) + } +} + +func produceMessages(t *testing.T, ctx context.Context, brokers string, topic string, count int) { + t.Helper() + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + defer cl.Close() + for i := range count { + res := cl.ProduceSync(ctx, &kgo.Record{ + Topic: topic, + Value: fmt.Appendf(nil, "msg-%d", i), + }) + require.NoError(t, res.FirstErr()) + } +} + +// consumeWithGroup creates a consumer group by consuming all available +// messages from the given topic and committing offsets. +func consumeWithGroup(t *testing.T, ctx context.Context, brokers string, groupName string, topic string) { + t.Helper() + cl, err := kgo.NewClient( + kgo.SeedBrokers(brokers), + kgo.ConsumerGroup(groupName), + kgo.ConsumeTopics(topic), + kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), + ) + require.NoError(t, err) + + pollCtx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + // Poll until we get records or timeout. + for { + fetches := cl.PollFetches(pollCtx) + if fetches.NumRecords() > 0 || pollCtx.Err() != nil { + break + } + } + + err = cl.CommitUncommittedOffsets(ctx) + require.NoError(t, err) + cl.Close() +} + +func startContainer(t *testing.T, ctx context.Context) string { + t.Helper() + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + return brokers +} + +func TestListEmpty(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + groups, err := List(ctx, admin) + require.NoError(t, err) + assert.Empty(t, groups) +} + +func TestListWithGroups(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-list-topic" + createTopic(t, ctx, admin, topic, 1) + produceMessages(t, ctx, brokers, topic, 5) + + consumeWithGroup(t, ctx, brokers, "group-alpha", topic) + consumeWithGroup(t, ctx, brokers, "group-beta", topic) + + groups, err := List(ctx, admin) + require.NoError(t, err) + require.Len(t, groups, 2) + + names := make([]string, len(groups)) + for i, g := range groups { + names[i] = g.Name + } + assert.Contains(t, names, "group-alpha") + assert.Contains(t, names, "group-beta") +} + +func TestDescribe(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-describe-topic" + createTopic(t, ctx, admin, topic, 1) + produceMessages(t, ctx, brokers, topic, 10) + consumeWithGroup(t, ctx, brokers, "describe-group", topic) + + desc, err := Describe(ctx, admin, "describe-group", nil) + require.NoError(t, err) + + assert.Equal(t, "describe-group", desc.Group) + // After consumer closed, state should be Empty. + assert.Equal(t, "Empty", desc.State) + + require.Len(t, desc.Topics, 1) + assert.Equal(t, topic, desc.Topics[0].Topic) + require.Len(t, desc.Topics[0].Partitions, 1) + + p := desc.Topics[0].Partitions[0] + assert.Equal(t, int32(0), p.Partition) + assert.Equal(t, int64(10), p.GroupOffset) + assert.Equal(t, int64(10), p.HighWatermark) + assert.Equal(t, int64(0), p.Lag) +} + +func TestDescribeWithTopicFilter(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topicA := "filter-topic-a" + topicB := "filter-topic-b" + createTopic(t, ctx, admin, topicA, 1) + createTopic(t, ctx, admin, topicB, 1) + produceMessages(t, ctx, brokers, topicA, 5) + produceMessages(t, ctx, brokers, topicB, 5) + + // Consume both topics in the same group. + cl, err := kgo.NewClient( + kgo.SeedBrokers(brokers), + kgo.ConsumerGroup("filter-group"), + kgo.ConsumeTopics(topicA, topicB), + kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), + ) + require.NoError(t, err) + + pollCtx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + for { + fetches := cl.PollFetches(pollCtx) + if fetches.NumRecords() > 0 || pollCtx.Err() != nil { + break + } + } + // Poll a second time to make sure we get records from both topics. + for { + fetches := cl.PollFetches(pollCtx) + if fetches.NumRecords() > 0 || pollCtx.Err() != nil { + break + } + } + err = cl.CommitUncommittedOffsets(ctx) + require.NoError(t, err) + cl.Close() + + // Describe with filter for topicA only. + desc, err := Describe(ctx, admin, "filter-group", []string{topicA}) + require.NoError(t, err) + + require.Len(t, desc.Topics, 1) + assert.Equal(t, topicA, desc.Topics[0].Topic) +} + +func TestDelete(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-delete-topic" + createTopic(t, ctx, admin, topic, 1) + produceMessages(t, ctx, brokers, topic, 5) + consumeWithGroup(t, ctx, brokers, "delete-group", topic) + + // Verify the group exists. + groups, err := ListGroupNames(ctx, admin) + require.NoError(t, err) + assert.Contains(t, groups, "delete-group") + + // Delete it. + err = Delete(ctx, admin, "delete-group") + require.NoError(t, err) + + // Verify it is gone. + groups, err = ListGroupNames(ctx, admin) + require.NoError(t, err) + assert.NotContains(t, groups, "delete-group") +} + +func TestCommitOffsets(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-commit-topic" + createTopic(t, ctx, admin, topic, 2) + produceMessages(t, ctx, brokers, topic, 10) + + // Create the group first so it exists (consume then close). + consumeWithGroup(t, ctx, brokers, "commit-group", topic) + + // Now commit specific offsets. + offsets := map[string]map[int32]int64{ + topic: { + 0: 3, + 1: 5, + }, + } + err := CommitOffsets(ctx, admin, "commit-group", offsets) + require.NoError(t, err) + + // Verify via Describe. + desc, err := Describe(ctx, admin, "commit-group", nil) + require.NoError(t, err) + + require.Len(t, desc.Topics, 1) + assert.Equal(t, topic, desc.Topics[0].Topic) + + partOffsets := make(map[int32]int64) + for _, p := range desc.Topics[0].Partitions { + partOffsets[p.Partition] = p.GroupOffset + } + assert.Equal(t, int64(3), partOffsets[0]) + assert.Equal(t, int64(5), partOffsets[1]) +} + +func TestListGroupNames(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-names-topic" + createTopic(t, ctx, admin, topic, 1) + produceMessages(t, ctx, brokers, topic, 5) + + consumeWithGroup(t, ctx, brokers, "zulu-group", topic) + consumeWithGroup(t, ctx, brokers, "alpha-group", topic) + consumeWithGroup(t, ctx, brokers, "mike-group", topic) + + names, err := ListGroupNames(ctx, admin) + require.NoError(t, err) + require.Len(t, names, 3) + + // Verify sorted order. + assert.Equal(t, "alpha-group", names[0]) + assert.Equal(t, "mike-group", names[1]) + assert.Equal(t, "zulu-group", names[2]) +} + +func TestDescribeNonExistent(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + _, err := Describe(ctx, admin, "non-existent-group", nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "not found") +} + +func TestDescribeLag(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + brokers := startContainer(t, ctx) + admin := createAdminClient(t, brokers) + + topic := "test-lag-topic" + createTopic(t, ctx, admin, topic, 1) + produceMessages(t, ctx, brokers, topic, 10) + + // Create a group and consume all messages. + consumeWithGroup(t, ctx, brokers, "lag-group", topic) + + // Produce more messages after the group has committed. + produceMessages(t, ctx, brokers, topic, 5) + + desc, err := Describe(ctx, admin, "lag-group", nil) + require.NoError(t, err) + + require.Len(t, desc.Topics, 1) + require.Len(t, desc.Topics[0].Partitions, 1) + + p := desc.Topics[0].Partitions[0] + // Group committed at offset 10, HWM should be 15. + assert.Equal(t, int64(10), p.GroupOffset) + assert.Equal(t, int64(15), p.HighWatermark) + assert.Equal(t, int64(5), p.Lag) + assert.Equal(t, int64(5), desc.Topics[0].TotalLag) +} diff --git a/pkg/node/node.go b/pkg/node/node.go new file mode 100644 index 00000000..6304e220 --- /dev/null +++ b/pkg/node/node.go @@ -0,0 +1,43 @@ +package node + +import ( + "context" + "fmt" + "sort" + + "github.com/twmb/franz-go/pkg/kadm" +) + +// Node represents a Kafka broker. +type Node struct { + ID int32 + Host string + Port int32 + IsController bool +} + +// Addr returns the broker address as "host:port". +func (n Node) Addr() string { + return fmt.Sprintf("%s:%d", n.Host, n.Port) +} + +// List returns all brokers in the cluster. +func List(ctx context.Context, admin *kadm.Client) ([]Node, error) { + meta, err := admin.BrokerMetadata(ctx) + if err != nil { + return nil, err + } + + nodes := make([]Node, 0, len(meta.Brokers)) + for _, b := range meta.Brokers { + nodes = append(nodes, Node{ + ID: b.NodeID, + Host: b.Host, + Port: b.Port, + IsController: b.NodeID == meta.Controller, + }) + } + + sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID }) + return nodes, nil +} diff --git a/pkg/node/node_integration_test.go b/pkg/node/node_integration_test.go new file mode 100644 index 00000000..499b5d86 --- /dev/null +++ b/pkg/node/node_integration_test.go @@ -0,0 +1,108 @@ +package node + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" +) + +func TestList(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + defer cl.Close() + admin := kadm.NewClient(cl) + defer admin.Close() + + nodes, err := List(ctx, admin) + require.NoError(t, err) + + require.Len(t, nodes, 1, "single-node Redpanda cluster should return exactly one node") + + n := nodes[0] + assert.NotEmpty(t, n.Host, "node host should not be empty") + assert.NotZero(t, n.Port, "node port should not be zero") + assert.True(t, n.IsController, "single node must be the controller") +} + +func TestListNodeAddr(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + defer cl.Close() + admin := kadm.NewClient(cl) + defer admin.Close() + + nodes, err := List(ctx, admin) + require.NoError(t, err) + require.Len(t, nodes, 1) + + n := nodes[0] + expected := fmt.Sprintf("%s:%d", n.Host, n.Port) + assert.Equal(t, expected, n.Addr(), "Addr() should return host:port") + assert.Contains(t, n.Addr(), ":", "Addr() must contain a colon separator") +} + +func TestListMultipleCalls(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx := context.Background() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + defer cl.Close() + admin := kadm.NewClient(cl) + defer admin.Close() + + first, err := List(ctx, admin) + require.NoError(t, err) + require.Len(t, first, 1) + + for range 3 { + got, err := List(ctx, admin) + require.NoError(t, err) + require.Len(t, got, 1) + + assert.Equal(t, first[0].ID, got[0].ID, "node ID should be consistent across calls") + assert.Equal(t, first[0].Host, got[0].Host, "node host should be consistent across calls") + assert.Equal(t, first[0].Port, got[0].Port, "node port should be consistent across calls") + assert.Equal(t, first[0].IsController, got[0].IsController, "controller status should be consistent across calls") + assert.Equal(t, first[0].Addr(), got[0].Addr(), "Addr() should be consistent across calls") + } +} diff --git a/pkg/partitioner/jvm.go b/pkg/partitioner/jvm.go deleted file mode 100644 index 84c814f1..00000000 --- a/pkg/partitioner/jvm.go +++ /dev/null @@ -1,98 +0,0 @@ -package partitioner - -import ( - "hash" - - "github.com/IBM/sarama" -) - -// NewJVMCompatiblePartitioner creates a Sarama partitioner that uses -// the same hashing algorithm as JVM Kafka clients. -func NewJVMCompatiblePartitioner(topic string) sarama.Partitioner { - return sarama.NewCustomHashPartitioner(MurmurHasher)(topic) -} - -// murmurHash implements hash.Hash32 interface, -// solely to conform to required hasher for Sarama. -// it does not support streaming since it is not required for Sarama. -type murmurHash struct { - v int32 -} - -// MurmurHasher creates murmur2 hasher implementing hash.Hash32 interface. -// The implementation is not full and does not support streaming. -// It only implements the interface to comply with sarama.NewCustomHashPartitioner signature. -// But Sarama only uses Write method once, when writing keys and values of the message, -// so streaming support is not necessary. -func MurmurHasher() hash.Hash32 { - return new(murmurHash) -} - -func (m *murmurHash) Write(d []byte) (n int, err error) { - n = len(d) - m.v = murmur2(d) - return -} - -func (m *murmurHash) Reset() { - m.v = 0 -} - -func (m *murmurHash) Size() int { return 32 } - -func (m *murmurHash) BlockSize() int { return 4 } - -// Sum is noop. -func (m *murmurHash) Sum(in []byte) []byte { - return in -} - -func (m *murmurHash) Sum32() uint32 { - return uint32(toPositive(m.v)) -} - -// murmur2 implements hashing algorithm used by JVM clients for Kafka. -// See the original implementation: https://github.com/apache/kafka/blob/1.0.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353 -func murmur2(data []byte) int32 { - length := int32(len(data)) - seed := uint32(0x9747b28c) - m := int32(0x5bd1e995) - r := uint32(24) - - h := int32(seed ^ uint32(length)) - length4 := length / 4 - - for i := int32(0); i < length4; i++ { - i4 := i * 4 - k := int32(data[i4+0]&0xff) + (int32(data[i4+1]&0xff) << 8) + (int32(data[i4+2]&0xff) << 16) + (int32(data[i4+3]&0xff) << 24) - k *= m - k ^= int32(uint32(k) >> r) - k *= m - h *= m - h ^= k - } - - switch length % 4 { - case 3: - h ^= int32(data[(length & ^3)+2]&0xff) << 16 - fallthrough - case 2: - h ^= int32(data[(length & ^3)+1]&0xff) << 8 - fallthrough - case 1: - h ^= int32(data[length & ^3] & 0xff) - h *= m - } - - h ^= int32(uint32(h) >> 13) - h *= m - h ^= int32(uint32(h) >> 15) - - return h -} - -// toPositive converts i to positive number as per the original implementation in the JVM clients for Kafka. -// See the original implementation: https://github.com/apache/kafka/blob/1.0.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L741 -func toPositive(i int32) int32 { - return i & 0x7fffffff -} diff --git a/pkg/proto/proto.go b/pkg/proto/proto.go index ca157401..869429dc 100644 --- a/pkg/proto/proto.go +++ b/pkg/proto/proto.go @@ -1,75 +1,74 @@ package proto import ( - "os" + "context" + "io/fs" "path/filepath" - "strings" - "github.com/jhump/protoreflect/desc" - "github.com/jhump/protoreflect/desc/protoparse" - "github.com/jhump/protoreflect/dynamic" + "github.com/bufbuild/protocompile" + "github.com/bufbuild/protocompile/linker" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/dynamicpb" ) type DescriptorRegistry struct { - descriptors []*desc.FileDescriptor + files linker.Files } func NewDescriptorRegistry(importPaths []string, exclusions []string) (*DescriptorRegistry, error) { - p := &protoparse.Parser{ - ImportPaths: importPaths, - } - var protoFiles []string for _, importPath := range importPaths { - err := filepath.Walk(importPath, func(path string, info os.FileInfo, err error) error { - if info != nil && !info.IsDir() && strings.HasSuffix(path, ".proto") { - protoFiles = append(protoFiles, path) + err := filepath.WalkDir(importPath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err } - + if d.IsDir() || !strings.HasSuffix(path, ".proto") { + return nil + } + rel, err := filepath.Rel(importPath, path) + if err != nil { + return err + } + for _, exclusion := range exclusions { + if strings.HasPrefix(rel, exclusion) { + return nil + } + } + protoFiles = append(protoFiles, rel) return nil }) if err != nil { return nil, err } - - } - - resolved, err := protoparse.ResolveFilenames(importPaths, protoFiles...) - if err != nil { - return nil, err } - var deduped []string - for _, i := range resolved { - - var exclusionFound bool - for _, exclusion := range exclusions { - if strings.HasPrefix(i, exclusion) { - exclusionFound = true - break - } - } - - if !exclusionFound { - deduped = append(deduped, i) - } + compiler := protocompile.Compiler{ + Resolver: &protocompile.SourceResolver{ + ImportPaths: importPaths, + }, } - descs, err := p.ParseFiles(deduped...) + files, err := compiler.Compile(context.Background(), protoFiles...) if err != nil { return nil, err } - return &DescriptorRegistry{descriptors: descs}, nil + return &DescriptorRegistry{files: files}, nil } -func (d *DescriptorRegistry) MessageForType(_type string) *dynamic.Message { - for _, descriptor := range d.descriptors { - if messageDescriptor := descriptor.FindMessage(_type); messageDescriptor != nil { - return dynamic.NewMessage(messageDescriptor) +func (d *DescriptorRegistry) MessageForType(_type string) *dynamicpb.Message { + for _, f := range d.files { + desc := f.FindDescriptorByName(protoreflect.FullName(_type)) + if desc == nil { + continue + } + msgDesc, ok := desc.(protoreflect.MessageDescriptor) + if !ok { + continue } + return dynamicpb.NewMessage(msgDesc) } return nil } diff --git a/pkg/streams/decoder.go b/pkg/streams/decoder.go deleted file mode 100644 index 8a86f356..00000000 --- a/pkg/streams/decoder.go +++ /dev/null @@ -1,331 +0,0 @@ -package streams - -import ( - "encoding/binary" - "errors" - "math" -) - -var errInvalidArrayLength = errors.New("invalid array length") -var errInvalidByteSliceLength = errors.New("invalid byteslice length") - -//var errInvalidByteSliceLengthType = errors.New("invalid byteslice length type") -var errInvalidStringLength = errors.New("invalid string length") - -//var errInvalidSubsetSize = errors.New("invalid subset size") -var errVarintOverflow = errors.New("varint overflow") -var errInvalidBool = errors.New("invalid bool") - -// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected -// when requesting messages, since as an optimization the server is allowed to return a partial message at the end -// of the message set. -var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") - -type realDecoder struct { - raw []byte - off int -} - -func NewDecoder(raw []byte) PacketDecoder { - return &realDecoder{ - raw: raw, - } -} - -type PacketDecoder interface { - // Primitives - getInt8() (int8, error) - getInt16() (int16, error) - getInt32() (int32, error) - getInt64() (int64, error) - getVarint() (int64, error) - getArrayLength() (int, error) - getBool() (bool, error) - - // Collections - getBytes() ([]byte, error) - getVarintBytes() ([]byte, error) - getRawBytes(length int) ([]byte, error) - getString() (string, error) - getNullableString() (*string, error) - getInt32Array() ([]int32, error) - getInt64Array() ([]int64, error) - getStringArray() ([]string, error) - - // Subsets - remaining() int - getSubset(length int) (PacketDecoder, error) - peek(offset, length int) (PacketDecoder, error) // similar to getSubset, but it doesn't advance the offset - -} - -// primitives - -func (rd *realDecoder) getInt8() (int8, error) { - if rd.remaining() < 1 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int8(rd.raw[rd.off]) - rd.off++ - return tmp, nil -} - -func (rd *realDecoder) getInt16() (int16, error) { - if rd.remaining() < 2 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) - rd.off += 2 - return tmp, nil -} - -func (rd *realDecoder) getInt32() (int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - return tmp, nil -} - -func (rd *realDecoder) getInt64() (int64, error) { - if rd.remaining() < 8 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - return tmp, nil -} - -func (rd *realDecoder) getVarint() (int64, error) { - tmp, n := binary.Varint(rd.raw[rd.off:]) - if n == 0 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - if n < 0 { - rd.off -= n - return -1, errVarintOverflow - } - rd.off += n - return tmp, nil -} - -func (rd *realDecoder) getArrayLength() (int, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) - rd.off += 4 - if tmp > rd.remaining() { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } else if tmp > 2*math.MaxUint16 { - return -1, errInvalidArrayLength - } - return tmp, nil -} - -func (rd *realDecoder) getBool() (bool, error) { - b, err := rd.getInt8() - if err != nil || b == 0 { - return false, err - } - if b != 1 { - return false, errInvalidBool - } - return true, nil -} - -// collections - -func (rd *realDecoder) getBytes() ([]byte, error) { - tmp, err := rd.getInt32() - if err != nil { - return nil, err - } - if tmp == -1 { - return nil, nil - } - - return rd.getRawBytes(int(tmp)) -} - -func (rd *realDecoder) getVarintBytes() ([]byte, error) { - tmp, err := rd.getVarint() - if err != nil { - return nil, err - } - if tmp == -1 { - return nil, nil - } - - return rd.getRawBytes(int(tmp)) -} - -func (rd *realDecoder) getStringLength() (int, error) { - length, err := rd.getInt16() - if err != nil { - return 0, err - } - - n := int(length) - - switch { - case n < -1: - return 0, errInvalidStringLength - case n > rd.remaining(): - rd.off = len(rd.raw) - return 0, ErrInsufficientData - } - - return n, nil -} - -func (rd *realDecoder) getString() (string, error) { - n, err := rd.getStringLength() - if err != nil || n == -1 { - return "", err - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getNullableString() (*string, error) { - n, err := rd.getStringLength() - if err != nil || n == -1 { - return nil, err - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return &tmpStr, err -} - -func (rd *realDecoder) getInt32Array() ([]int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 4*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int32, n) - for i := range ret { - ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - } - return ret, nil -} - -func (rd *realDecoder) getInt64Array() ([]int64, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 8*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]int64, n) - for i := range ret { - ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - } - return ret, nil -} - -func (rd *realDecoder) getStringArray() ([]string, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, errInvalidArrayLength - } - - ret := make([]string, n) - for i := range ret { - str, err := rd.getString() - if err != nil { - return nil, err - } - - ret[i] = str - } - return ret, nil -} - -// subsets - -func (rd *realDecoder) remaining() int { - return len(rd.raw) - rd.off -} - -func (rd *realDecoder) getSubset(length int) (PacketDecoder, error) { - buf, err := rd.getRawBytes(length) - if err != nil { - return nil, err - } - return &realDecoder{raw: buf}, nil -} - -func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { - if length < 0 { - return nil, errInvalidByteSliceLength - } else if length > rd.remaining() { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - start := rd.off - rd.off += length - return rd.raw[start:rd.off], nil -} - -func (rd *realDecoder) peek(offset, length int) (PacketDecoder, error) { - if rd.remaining() < offset+length { - return nil, ErrInsufficientData - } - off := rd.off + offset - return &realDecoder{raw: rd.raw[off : off+length]}, nil -} diff --git a/pkg/streams/subscription_info.go b/pkg/streams/subscription_info.go deleted file mode 100644 index 0ed50b98..00000000 --- a/pkg/streams/subscription_info.go +++ /dev/null @@ -1,79 +0,0 @@ -package streams - -type SubscriptionInfo struct { - Version int32 - UUID []byte // 16-byte UUID - ProcessID string - PrevTasks []TaskID - StandbyTasks []TaskID - UserEndpoint string -} - -// Support version 1+2 -func (s *SubscriptionInfo) Decode(pd PacketDecoder) (err error) { - s.Version, err = pd.getInt32() - if err != nil { - return err - } - - s.UUID, err = pd.getRawBytes(16) - if err != nil { - return err - } - - numPrevs, err := pd.getInt32() - if err != nil { - return err - } - - for i := 0; i < int(numPrevs); i++ { - t := TaskID{} - - t.TopicGroupID, err = pd.getInt32() - if err != nil { - return err - } - - t.Partition, err = pd.getInt32() - if err != nil { - return err - } - - s.PrevTasks = append(s.PrevTasks, t) - } - - numStandby, err := pd.getInt32() - if err != nil { - return err - } - - for i := 0; i < int(numStandby); i++ { - t := TaskID{} - - t.TopicGroupID, err = pd.getInt32() - if err != nil { - return err - } - - t.Partition, err = pd.getInt32() - if err != nil { - return err - } - - s.StandbyTasks = append(s.StandbyTasks, t) - } - - userEndpointBytes, err := pd.getBytes() - if err != nil { - return err - } - - s.UserEndpoint = string(userEndpointBytes) - - return nil -} - -type TaskID struct { - TopicGroupID int32 - Partition int32 -} diff --git a/pkg/topic/topic.go b/pkg/topic/topic.go new file mode 100644 index 00000000..15d022b6 --- /dev/null +++ b/pkg/topic/topic.go @@ -0,0 +1,206 @@ +package topic + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TopicInfo represents a topic listing entry. +type TopicInfo struct { + Name string + Partitions int32 + ReplicationFactor int16 +} + +// List returns all topics sorted by name. +func List(ctx context.Context, admin *kadm.Client) ([]TopicInfo, error) { + topics, err := admin.ListTopics(ctx) + if err != nil { + return nil, err + } + + result := make([]TopicInfo, 0, len(topics)) + for _, t := range topics.Sorted() { + var rf int16 + sorted := t.Partitions.Sorted() + if len(sorted) > 0 { + rf = int16(len(sorted[0].Replicas)) + } + result = append(result, TopicInfo{ + Name: t.Topic, + Partitions: int32(len(t.Partitions)), + ReplicationFactor: rf, + }) + } + + return result, nil +} + +// PartitionInfo holds partition detail for topic description. +type PartitionInfo struct { + ID int32 + Leader int32 + Replicas []int32 + ISR []int32 + HighWatermark int64 +} + +// TopicDescription holds the full description of a topic. +type TopicDescription struct { + Name string + IsInternal bool + Compacted bool + Partitions []PartitionInfo + Configs []ConfigEntry +} + +// ConfigEntry holds a non-default config entry. +type ConfigEntry struct { + Name string + Value string + Sensitive bool + IsDefault bool +} + +// Describe returns full topic details including partitions and config. +func Describe(ctx context.Context, admin *kadm.Client, topic string) (*TopicDescription, error) { + topics, err := admin.ListTopics(ctx, topic) + if err != nil { + return nil, err + } + + detail, ok := topics[topic] + if !ok || detail.Err != nil { + if ok && detail.Err != nil { + return nil, detail.Err + } + return nil, fmt.Errorf("topic %s not found", topic) + } + + // Get high watermarks. + endOffsets, err := admin.ListEndOffsets(ctx, topic) + if err != nil { + return nil, fmt.Errorf("list end offsets: %w", err) + } + + // Get config. + configs, err := admin.DescribeTopicConfigs(ctx, topic) + if err != nil { + return nil, fmt.Errorf("describe config: %w", err) + } + + var compacted bool + var cfgEntries []ConfigEntry + if len(configs) > 0 { + for _, c := range configs[0].Configs { + if c.Key == "cleanup.policy" && c.Value != nil && strings.Contains(*c.Value, "compact") { + compacted = true + } + cfgEntries = append(cfgEntries, ConfigEntry{ + Name: c.Key, + Value: derefStr(c.Value), + Sensitive: c.Sensitive, + IsDefault: c.Source == kmsg.ConfigSourceDefaultConfig, + }) + } + } + + sorted := detail.Partitions.Sorted() + + partitions := make([]PartitionInfo, 0, len(sorted)) + for _, p := range sorted { + var hwm int64 + if o, ok := endOffsets.Lookup(topic, p.Partition); ok { + hwm = o.Offset + } + replicas := make([]int32, len(p.Replicas)) + copy(replicas, p.Replicas) + sort.Slice(replicas, func(i, j int) bool { return replicas[i] < replicas[j] }) + isr := make([]int32, len(p.ISR)) + copy(isr, p.ISR) + sort.Slice(isr, func(i, j int) bool { return isr[i] < isr[j] }) + partitions = append(partitions, PartitionInfo{ + ID: p.Partition, + Leader: p.Leader, + Replicas: replicas, + ISR: isr, + HighWatermark: hwm, + }) + } + + return &TopicDescription{ + Name: detail.Topic, + IsInternal: detail.IsInternal, + Compacted: compacted, + Partitions: partitions, + Configs: cfgEntries, + }, nil +} + +// Create creates a topic with the given parameters. +func Create(ctx context.Context, admin *kadm.Client, name string, partitions int32, replicas int16, configs map[string]*string) error { + resp, err := admin.CreateTopic(ctx, partitions, replicas, configs, name) + if err != nil { + return err + } + return resp.Err +} + +// Delete deletes a topic. +func Delete(ctx context.Context, admin *kadm.Client, name string) error { + resp, err := admin.DeleteTopic(ctx, name) + if err != nil { + return err + } + return resp.Err +} + +// SetConfig sets topic configuration entries using incremental alter. +func SetConfig(ctx context.Context, admin *kadm.Client, topic string, configs map[string]string) error { + alterConfigs := make([]kadm.AlterConfig, 0, len(configs)) + for k, v := range configs { + v := v + alterConfigs = append(alterConfigs, kadm.AlterConfig{ + Op: kadm.SetConfig, + Name: k, + Value: &v, + }) + } + + resps, err := admin.AlterTopicConfigsState(ctx, alterConfigs, topic) + if err != nil { + return err + } + for _, r := range resps { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// UpdatePartitions sets the partition count on a topic. +func UpdatePartitions(ctx context.Context, admin *kadm.Client, topic string, count int) error { + resps, err := admin.UpdatePartitions(ctx, count, topic) + if err != nil { + return err + } + for _, r := range resps.Sorted() { + if r.Err != nil { + return r.Err + } + } + return nil +} + +func derefStr(s *string) string { + if s == nil { + return "" + } + return *s +} diff --git a/pkg/topic/topic_integration_test.go b/pkg/topic/topic_integration_test.go new file mode 100644 index 00000000..6c67124d --- /dev/null +++ b/pkg/topic/topic_integration_test.go @@ -0,0 +1,275 @@ +package topic + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/modules/redpanda" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" +) + +func setupAdmin(t *testing.T, ctx context.Context) *kadm.Client { + t.Helper() + + rpContainer, err := redpanda.Run(ctx, "redpandadata/redpanda:v24.3.1") + require.NoError(t, err) + t.Cleanup(func() { _ = rpContainer.Terminate(context.Background()) }) + + brokers, err := rpContainer.KafkaSeedBroker(ctx) + require.NoError(t, err) + + cl, err := kgo.NewClient(kgo.SeedBrokers(brokers)) + require.NoError(t, err) + t.Cleanup(func() { cl.Close() }) + + admin := kadm.NewClient(cl) + t.Cleanup(func() { admin.Close() }) + + return admin +} + +func TestCreate(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + err := Create(ctx, admin, "test-topic", 1, 1, nil) + require.NoError(t, err) + + topics, err := List(ctx, admin) + require.NoError(t, err) + + var found bool + for _, ti := range topics { + if ti.Name == "test-topic" { + found = true + break + } + } + require.True(t, found, "created topic should appear in list") +} + +func TestCreateWithCompaction(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + compactVal := "compact" + err := Create(ctx, admin, "compacted-topic", 1, 1, map[string]*string{ + "cleanup.policy": &compactVal, + }) + require.NoError(t, err) + + desc, err := Describe(ctx, admin, "compacted-topic") + require.NoError(t, err) + + assert.Equal(t, "compacted-topic", desc.Name) + assert.True(t, desc.Compacted, "topic should be marked as compacted") +} + +func TestList(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + // Create topics in non-alphabetical order. + require.NoError(t, Create(ctx, admin, "charlie", 3, 1, nil)) + require.NoError(t, Create(ctx, admin, "alpha", 1, 1, nil)) + require.NoError(t, Create(ctx, admin, "bravo", 2, 1, nil)) + + topics, err := List(ctx, admin) + require.NoError(t, err) + + // Filter to only our test topics. + expected := map[string]struct{}{ + "alpha": {}, + "bravo": {}, + "charlie": {}, + } + var filtered []TopicInfo + for _, ti := range topics { + if _, ok := expected[ti.Name]; ok { + filtered = append(filtered, ti) + } + } + require.Len(t, filtered, 3) + + // Verify sorted order. + assert.Equal(t, "alpha", filtered[0].Name) + assert.Equal(t, "bravo", filtered[1].Name) + assert.Equal(t, "charlie", filtered[2].Name) + + // Verify partition counts. + assert.Equal(t, int32(1), filtered[0].Partitions) + assert.Equal(t, int32(2), filtered[1].Partitions) + assert.Equal(t, int32(3), filtered[2].Partitions) + + // Verify replication factor (single node = 1). + for _, ti := range filtered { + assert.Equal(t, int16(1), ti.ReplicationFactor) + } +} + +func TestDescribe(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + require.NoError(t, Create(ctx, admin, "describe-me", 3, 1, nil)) + + desc, err := Describe(ctx, admin, "describe-me") + require.NoError(t, err) + + assert.Equal(t, "describe-me", desc.Name) + assert.False(t, desc.IsInternal) + assert.False(t, desc.Compacted) + + // Verify partitions. + require.Len(t, desc.Partitions, 3) + for i, p := range desc.Partitions { + assert.Equal(t, int32(i), p.ID, "partition IDs should be sequential") + assert.GreaterOrEqual(t, p.Leader, int32(0), "leader should be a valid node") + assert.NotEmpty(t, p.Replicas, "replicas should not be empty") + assert.NotEmpty(t, p.ISR, "ISR should not be empty") + assert.GreaterOrEqual(t, p.HighWatermark, int64(0), "high watermark should be non-negative") + } + + // Verify configs are populated. + assert.NotEmpty(t, desc.Configs, "configs should not be empty") +} + +func TestDelete(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + require.NoError(t, Create(ctx, admin, "to-delete", 1, 1, nil)) + + // Confirm it exists. + topics, err := List(ctx, admin) + require.NoError(t, err) + var found bool + for _, ti := range topics { + if ti.Name == "to-delete" { + found = true + } + } + require.True(t, found, "topic should exist before deletion") + + // Delete. + require.NoError(t, Delete(ctx, admin, "to-delete")) + + // Confirm it's gone. + topics, err = List(ctx, admin) + require.NoError(t, err) + for _, ti := range topics { + assert.NotEqual(t, "to-delete", ti.Name, "deleted topic should not appear in list") + } +} + +func TestSetConfig(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + require.NoError(t, Create(ctx, admin, "config-topic", 1, 1, nil)) + + err := SetConfig(ctx, admin, "config-topic", map[string]string{ + "retention.ms": "86400000", + }) + require.NoError(t, err) + + desc, err := Describe(ctx, admin, "config-topic") + require.NoError(t, err) + + var retentionFound bool + for _, c := range desc.Configs { + if c.Name == "retention.ms" { + retentionFound = true + assert.Equal(t, "86400000", c.Value) + assert.False(t, c.IsDefault, "explicitly set config should not be marked as default") + break + } + } + require.True(t, retentionFound, "retention.ms config should be present") +} + +func TestUpdatePartitions(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + require.NoError(t, Create(ctx, admin, "grow-partitions", 1, 1, nil)) + + // Verify initial state. + desc, err := Describe(ctx, admin, "grow-partitions") + require.NoError(t, err) + require.Len(t, desc.Partitions, 1) + + // Increase to 3 partitions. + require.NoError(t, UpdatePartitions(ctx, admin, "grow-partitions", 3)) + + desc, err = Describe(ctx, admin, "grow-partitions") + require.NoError(t, err) + assert.Len(t, desc.Partitions, 3) +} + +func TestDescribeNonExistent(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + _, err := Describe(ctx, admin, "does-not-exist") + require.Error(t, err) +} + +func TestCreateDuplicate(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + admin := setupAdmin(t, ctx) + + require.NoError(t, Create(ctx, admin, "dup-topic", 1, 1, nil)) + + err := Create(ctx, admin, "dup-topic", 1, 1, nil) + require.Error(t, err, "creating a duplicate topic should return an error") +}