diff --git a/.gitignore b/.gitignore index 0575dc4..baa1b5a 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ cars2 config.toml datas datas2 +datas3 diff --git a/README.md b/README.md index 7550e8e..9e0744c 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,16 @@ payload_cid,filename,piece_cid,piece_size,detail ba...,graph-slice-name.car,baga...,16646144,inner-structure-json ``` +Config: + +[example](https://github.com/ipfs-force-community/go-graphsplit/blob/main/config/example.toml) + +config 包含三个字段: + +* SliceSize piece 源文件大小,默认是 18Gib +* ExtraFilePath 指向存储了图片、视频等文件的目录 +* ExtraFileSizeInOnePiece 每个 piece 文件包含图片和视频等文件的大小,例如:500Gib + Import car file to IPFS: ```sh ipfs dag import /path/to/car-dir/car-file diff --git a/chunk.go b/chunk.go index fba8c23..6bde9a7 100644 --- a/chunk.go +++ b/chunk.go @@ -154,11 +154,20 @@ func ErrCallback() GraphBuildCallback { return &errCallback{} } -func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, graphName string, parallel int, cb GraphBuildCallback) error { +func Chunk(ctx context.Context, + expectSliceSize int64, + parentPath, + targetPath, + carDir, + graphName string, + parallel int, + cb GraphBuildCallback, + ef *ExtraFile, +) error { var cumuSize int64 = 0 graphSliceCount := 0 graphFiles := make([]Finfo, 0) - if sliceSize == 0 { + if expectSliceSize == 0 { return fmt.Errorf("slice size has been set as 0") } if parallel <= 0 { @@ -168,36 +177,39 @@ func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, parentPath = targetPath } + partSliceSize := expectSliceSize - ef.sliceSize args := []string{targetPath} - sliceTotal := GetGraphCount(args, sliceSize) + sliceTotal := GetGraphCount(args, expectSliceSize) if sliceTotal == 0 { log.Warn("Empty folder or file!") return nil } files := GetFileListAsync(args) for item := range files { + item := tryRenameFileName([]Finfo{item})[0] + // log.Infof("name: %s", item.Name) fileSize := item.Info.Size() switch { - case cumuSize+fileSize < sliceSize: + case cumuSize+fileSize < partSliceSize: cumuSize += fileSize graphFiles = append(graphFiles, item) - case cumuSize+fileSize == sliceSize: + case cumuSize+fileSize == partSliceSize: cumuSize += fileSize graphFiles = append(graphFiles, item) // todo build ipld from graphFiles - BuildIpldGraph(ctx, graphFiles, GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, sliceSize) + BuildIpldGraph(ctx, append(ef.getFiles(), graphFiles...), GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, expectSliceSize, ef) log.Infof("cumu-size: %d", cumuSize) log.Infof("%s", GenGraphName(graphName, graphSliceCount, sliceTotal)) log.Infof("=================") cumuSize = 0 graphFiles = make([]Finfo, 0) graphSliceCount++ - case cumuSize+fileSize > sliceSize: + case cumuSize+fileSize > partSliceSize: fileSliceCount := 0 // need to split item to fit graph slice // // first cut - firstCut := sliceSize - cumuSize + firstCut := partSliceSize - cumuSize var seekStart int64 = 0 var seekEnd int64 = seekStart + firstCut - 1 log.Infof("first cut %d, seek start at %d, end at %d", firstCut, seekStart, seekEnd) @@ -211,7 +223,7 @@ func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, }) fileSliceCount++ // todo build ipld from graphFiles - BuildIpldGraph(ctx, graphFiles, GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, sliceSize) + BuildIpldGraph(ctx, append(ef.getFiles(), graphFiles...), GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, expectSliceSize, ef) log.Infof("cumu-size: %d", cumuSize+firstCut) log.Infof("%s", GenGraphName(graphName, graphSliceCount, sliceTotal)) log.Infof("=================") @@ -220,7 +232,7 @@ func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, graphSliceCount++ for seekEnd < fileSize-1 { seekStart = seekEnd + 1 - seekEnd = seekStart + sliceSize - 1 + seekEnd = seekStart + partSliceSize - 1 if seekEnd >= fileSize-1 { seekEnd = fileSize - 1 } @@ -235,10 +247,10 @@ func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, SeekEnd: seekEnd, }) fileSliceCount++ - if seekEnd-seekStart == sliceSize-1 { + if seekEnd-seekStart == partSliceSize-1 { // todo build ipld from graphFiles - BuildIpldGraph(ctx, graphFiles, GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, sliceSize) - log.Infof("cumu-size: %d", sliceSize) + BuildIpldGraph(ctx, append(ef.getFiles(), graphFiles...), GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, expectSliceSize, ef) + log.Infof("cumu-size: %d", partSliceSize) log.Infof("%s", GenGraphName(graphName, graphSliceCount, sliceTotal)) log.Infof("=================") cumuSize = 0 @@ -250,7 +262,7 @@ func Chunk(ctx context.Context, sliceSize int64, parentPath, targetPath, carDir, } if cumuSize > 0 { // todo build ipld from graphFiles - BuildIpldGraph(ctx, graphFiles, GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, sliceSize) + BuildIpldGraph(ctx, append(ef.getFiles(), graphFiles...), GenGraphName(graphName, graphSliceCount, sliceTotal), parentPath, carDir, parallel, cb, expectSliceSize, ef) log.Infof("cumu-size: %d", cumuSize) log.Infof("%s", GenGraphName(graphName, graphSliceCount, sliceTotal)) log.Infof("=================") diff --git a/cmd/graphsplit/main.go b/cmd/graphsplit/main.go index 250a070..d1d2477 100644 --- a/cmd/graphsplit/main.go +++ b/cmd/graphsplit/main.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "os" + "strings" "time" + "github.com/docker/go-units" "github.com/filedrive-team/go-graphsplit" "github.com/filedrive-team/go-graphsplit/config" "github.com/filedrive-team/go-graphsplit/dataset" @@ -66,7 +68,7 @@ var chunkCmd = &cli.Command{ }, &cli.BoolFlag{ Name: "calc-commp", - Value: false, + Value: true, Usage: "create a mainfest.csv in car-dir to save mapping of data-cids, slice names, piece-cids and piece-sizes", }, &cli.BoolFlag{ @@ -104,12 +106,13 @@ var chunkCmd = &cli.Command{ if cfgPath == "" { return fmt.Errorf("config file path is required") } - log.Infoln("config file path: ", cfgPath) cfg, err := config.LoadConfig(cfgPath) if err != nil { - return fmt.Errorf("failed to load config file: %v", err) + return fmt.Errorf("failed to load config file(%s): %v", cfgPath, err) } + log.Infof("config file: %+v", cfg) + log.Infof("old slice size: %d", cfg.SliceSize) cfg.SliceSize++ sliceSize := cfg.SliceSize @@ -122,7 +125,26 @@ var chunkCmd = &cli.Command{ return fmt.Errorf("failed to save config file: %v", err) } - targetPath := c.Args().First() + var extraFileSliceSize int64 + if len(cfg.ExtraFilePath) != 0 { + if cfg.ExtraFileSizeInOnePiece == "" { + return fmt.Errorf("extra file size in one piece is required when extra file path is set") + } + extraFileSliceSize, err = units.RAMInBytes(cfg.ExtraFileSizeInOnePiece) + if err != nil { + return fmt.Errorf("failed to parse real file size: %v", err) + } + } + if sliceSize+int(extraFileSliceSize) > 32*graphsplit.Gib { + return fmt.Errorf("slice size %d + extra file slice size %d exceeds 32 GiB", sliceSize, extraFileSliceSize) + } + log.Infof("extra file slice size: %d", extraFileSliceSize) + rf, err := graphsplit.NewRealFile(strings.TrimSuffix(cfg.ExtraFilePath, "/"), int64(extraFileSliceSize), int64(sliceSize)) + if err != nil { + return err + } + + targetPath := strings.TrimSuffix(c.Args().First(), "/") var cb graphsplit.GraphBuildCallback if c.Bool("calc-commp") { cb = graphsplit.CommPCallback(carDir, c.Bool("rename"), c.Bool("add-padding")) @@ -136,11 +158,11 @@ var chunkCmd = &cli.Command{ fmt.Println("loop: ", loop) if !loop { fmt.Println("chunking once...") - return graphsplit.Chunk(ctx, int64(sliceSize), parentPath, targetPath, carDir, graphName, int(parallel), cb) + return graphsplit.Chunk(ctx, int64(sliceSize), parentPath, targetPath, carDir, graphName, int(parallel), cb, rf) } fmt.Println("loop chunking...") for { - err = graphsplit.Chunk(ctx, int64(sliceSize), parentPath, targetPath, carDir, graphName, int(parallel), cb) + err = graphsplit.Chunk(ctx, int64(sliceSize), parentPath, targetPath, carDir, graphName, int(parallel), cb, rf) if err != nil { return fmt.Errorf("failed to chunk: %v", err) } diff --git a/config/config.go b/config/config.go index 42191e2..b39534e 100644 --- a/config/config.go +++ b/config/config.go @@ -1,18 +1,26 @@ package config import ( + "bytes" + "fmt" "os" + "reflect" + "strings" "github.com/BurntSushi/toml" ) type Config struct { - SliceSize int + SliceSize int `toml:"SliceSize" comment:"SliceSize, the size of each slice in bytes, default is 18G"` + ExtraFilePath string `toml:"ExtraFilePath" comment:"ExtraFilePath extra file path, 指向存储了图片、视频等文件的目录"` + ExtraFileSizeInOnePiece string `toml:"ExtraFileSizeInOnePiece" comment:"ExtraFileSizeInOnePiece 每个 piece 文件包含图片和视频等文件的大小, 例如:500Mib"` } func NewConfig() *Config { return &Config{ - SliceSize: 19327352832, // 18G + SliceSize: 19327352832, // 18G + ExtraFileSizeInOnePiece: "", + ExtraFilePath: "", } } @@ -46,3 +54,55 @@ func (c *Config) SaveConfig(filePath string) error { } return nil } + +func generateTOMLWithComments(data any) (string, error) { + // Step 1: Encode struct to TOML + var buf bytes.Buffer + if err := toml.NewEncoder(&buf).Encode(data); err != nil { + return "", fmt.Errorf("failed to encode TOML: %v", err) + } + tomlLines := strings.Split(buf.String(), "\n") + + // Step 2: Get field comments using reflection + comments := make(map[string]string) + val := reflect.ValueOf(data) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tomlTag := field.Tag.Get("toml") + commentTag := field.Tag.Get("comment") + if tomlTag != "" && commentTag != "" { + comments[tomlTag] = commentTag + } + } + + // Step 3: Insert comments before corresponding TOML keys + var result []string + for _, line := range tomlLines { + // Skip empty lines + if strings.TrimSpace(line) == "" { + result = append(result, line) + continue + } + + // Check if the line contains a TOML key + for key, comment := range comments { + if strings.HasPrefix(strings.TrimSpace(line), key+" =") { + result = append(result, fmt.Sprintf("# %s", comment)) + } + } + result = append(result, line) + } + + // Add a header comment + header := []string{ + "# 配置文件", + "# 自动生成,包含字段说明", + "", + } + return strings.Join(append(header, result...), "\n"), nil +} diff --git a/config/config_test.go b/config/config_test.go index 68670a9..9837f14 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "os" "testing" "github.com/gozelle/testify/require" @@ -9,8 +10,9 @@ import ( func TestConfig(t *testing.T) { cfg := NewConfig() - err := cfg.SaveConfig("example.toml") + data, err := generateTOMLWithComments(cfg) require.NoError(t, err) + require.NoError(t, os.WriteFile("example.toml", []byte(data), 0644)) loadedCfg, err := LoadConfig("example.toml") require.NoError(t, err) diff --git a/config/example.toml b/config/example.toml index 4bbb5fe..f6ce924 100644 --- a/config/example.toml +++ b/config/example.toml @@ -1 +1,9 @@ +# 配置文件 +# 自动生成,包含字段说明 + +# SliceSize, the size of each slice in bytes, default is 18G SliceSize = 19327352832 +# ExtraFilePath extra file path, 指向存储了图片、视频等文件的目录 +ExtraFilePath = "" +# ExtraFileSizeInOnePiece 每个 piece 文件包含图片和视频等文件的大小, 例如:500Mib +ExtraFileSizeInOnePiece = "" diff --git a/extra_file.go b/extra_file.go new file mode 100644 index 0000000..8471f63 --- /dev/null +++ b/extra_file.go @@ -0,0 +1,64 @@ +package graphsplit + +import ( + "fmt" + "os" +) + +const Gib = 1024 * 1024 * 1024 + +type ExtraFile struct { + path string + files []Finfo + idx int + sliceSize int64 + pieceRawSize int64 +} + +func NewRealFile(path string, sliceSize int64, pieceRawSize int64) (*ExtraFile, error) { + rf := &ExtraFile{path: path, sliceSize: sliceSize, pieceRawSize: pieceRawSize} + if path != "" { + finfo, err := os.Stat(path) + if err != nil { + return nil, err + } + if !finfo.IsDir() { + return nil, fmt.Errorf("the path %s is not a directory", path) + } + rf.walk() + } + + return rf, nil +} + +func (rf *ExtraFile) walk() { + files := GetFileListAsync([]string{rf.path}) + for item := range files { + rf.files = append(rf.files, item) + } + rf.files = tryRenameFileName(rf.files) +} + +func (rf *ExtraFile) getFiles() []Finfo { + count := len(rf.files) + if count == 0 { + return nil + } + var total int64 + var files []Finfo + startIdx := rf.idx + for total < rf.sliceSize { + file := rf.files[rf.idx] + if total+file.Info.Size()+rf.pieceRawSize <= 32*Gib { + total += file.Info.Size() + files = append(files, file) + } + rf.idx = (rf.idx + 1) % count + + if rf.idx == startIdx { + break + } + } + + return files +} diff --git a/go.mod b/go.mod index 9a1e7a7..f392d04 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/filedrive-team/go-graphsplit go 1.21 require ( + github.com/BurntSushi/toml v1.5.0 github.com/beeleelee/go-ds-rpc v0.1.0 // this needs to be updated too https://github.com/beeleelee/go-ds-rpc/pull/3 + github.com/docker/go-units v0.5.0 github.com/filecoin-project/go-commp-utils/v2 v2.1.0 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-state-types v0.14.0 @@ -24,16 +26,6 @@ require ( ) require ( - github.com/gozelle/color v1.14.1 // indirect - github.com/gozelle/go-difflib v1.0.0 // indirect - github.com/gozelle/spew v1.1.17 // indirect - github.com/gozelle/yaml v0.0.0-20221214152138-81b78a92d903 // indirect - github.com/ipfs/go-ipld-legacy v0.2.1 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect -) - -require ( - github.com/BurntSushi/toml v1.5.0 github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect @@ -49,7 +41,11 @@ require ( github.com/golang/snappy v0.0.1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect + github.com/gozelle/color v1.14.1 // indirect + github.com/gozelle/go-difflib v1.0.0 // indirect + github.com/gozelle/spew v1.1.17 // indirect github.com/gozelle/testify v1.8.12 + github.com/gozelle/yaml v0.0.0-20221214152138-81b78a92d903 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect @@ -59,6 +55,7 @@ require ( github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-cbor v0.1.0 // indirect + github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-verifcid v0.0.1 // indirect @@ -67,6 +64,7 @@ require ( github.com/klauspost/compress v1.11.7 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect github.com/minio/sha256-simd v1.0.1 // indirect diff --git a/go.sum b/go.sum index c80a3f6..cabf2b3 100644 --- a/go.sum +++ b/go.sum @@ -30,6 +30,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= diff --git a/utils.go b/utils.go index c842743..422601f 100644 --- a/utils.go +++ b/utils.go @@ -6,8 +6,10 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "os" "path" + "regexp" "runtime" "strings" "sync" @@ -133,13 +135,15 @@ func (b *FSBuilder) getNodeByLink(ln *ipld.Link) (fn fsNode, err error) { func BuildIpldGraph(ctx context.Context, fileList []Finfo, - graphName, parentPath, + graphName, + parentPath, carDir string, parallel int, cb GraphBuildCallback, sliceSize int64, + ef *ExtraFile, ) { - buf, payloadCid, fsDetail, err := buildIpldGraph(ctx, fileList, parentPath, parallel, sliceSize) + buf, payloadCid, fsDetail, err := buildIpldGraph(ctx, fileList, parentPath, parallel, sliceSize, ef) if err != nil { // log.Fatal(err) cb.OnError(err) @@ -148,7 +152,13 @@ func BuildIpldGraph(ctx context.Context, cb.OnSuccess(buf, graphName, payloadCid, fsDetail) } -func buildIpldGraph(ctx context.Context, fileList []Finfo, parentPath string, parallel int, sliceSize int64) (*Buffer, string, string, error) { +func buildIpldGraph(ctx context.Context, + fileList []Finfo, + parentPath string, + parallel int, + sliceSize int64, + ef *ExtraFile, +) (*Buffer, string, string, error) { bs2 := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) dagServ := dag.NewDAGService(blockservice.New(bs2, offline.Exchange(bs2))) @@ -209,12 +219,17 @@ func buildIpldGraph(ctx context.Context, fileList []Finfo, parentPath string, pa // log.Infof("file name: %s, file size: %d, item size: %d, seek-start:%d, seek-end:%d", item.Name, item.Info.Size(), item.SeekEnd-item.SeekStart, item.SeekStart, item.SeekEnd) dirStr := path.Dir(item.Path) parentPath = path.Clean(parentPath) + parentPath2 := path.Clean(ef.path) + // log.Infof("parentPath: %s, parentPath2: %s, item.Path: %s, clean path: %v, dirStr: %s", parentPath, parentPath2, item.Path, path.Clean(item.Path), dirStr) // when parent path equal target path, and the parent path is also a file path - if parentPath == path.Clean(item.Path) { + if parentPath == path.Clean(item.Path) || parentPath2 == path.Clean(item.Path) { dirStr = "" } else if parentPath != "" && strings.HasPrefix(dirStr, parentPath) { dirStr = dirStr[len(parentPath):] + } else if parentPath2 != "" && strings.HasPrefix(dirStr, parentPath2) { + dirStr = dirStr[len(parentPath2):] } + // log.Infof("dirStr: %s", dirStr) dirStr = strings.TrimPrefix(dirStr, "/") var dirList []string @@ -231,8 +246,8 @@ func buildIpldGraph(ctx context.Context, fileList []Finfo, parentPath string, pa dirNodeMap[rootKey].AddNodeLink(item.Name, fileNode) continue } - // log.Info(item.Path) - // log.Info(dirList) + // log.Info("path:", item.Path) + // log.Info("dir list:", dirList) i := len(dirList) - 1 for ; i >= 0; i-- { // get dirNodeMap by index @@ -569,3 +584,56 @@ func PadCar(w io.Writer, carSize int64) error { return nil } + +var nameReg = regexp.MustCompile(`^[^_]+_[^_]+\.[^_]+$`) + +func tryRenameFileName(fis []Finfo) []Finfo { + rename := func(in string) string { + parts := strings.Split(in, "_") + if len(parts) < 2 { + return in + } + arr := strings.Split(parts[1], ".") + if len(arr) < 2 { + return in + } + // 重新拼接文件名 + return fmt.Sprintf("%s_%s.%s", parts[0], RandomLetters(), arr[1]) + } + + for i, fi := range fis { + // log.Infof("try rename file: %s", fi.Name) + if nameReg.MatchString(fi.Name) { + newName := rename(fi.Name) + fis[i].Name = newName + // log.Infof("rename file name %s to %s", fi.Name, newName) + } + } + + return fis +} + +// RandomLetters 从26个英文字母中随机挑选6到8个字母 +func RandomLetters() string { + // 设置随机种子 + rand.New(rand.NewSource(time.Now().UnixNano())) + + // 定义字母表 + letters := []rune{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'} + + numLetters := 8 + selected := make([]rune, 0, numLetters) + + // 随机打乱字母表 + for i := len(letters) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + letters[i], letters[j] = letters[j], letters[i] + } + + // 选取前numLetters个字母 + for i := 0; i < numLetters; i++ { + selected = append(selected, letters[i]) + } + + return string(selected) +}