@@ -28,13 +28,13 @@ const (
2828//
2929// See more: https://docs.atlas.mongodb.com/reference/api/atlas-search/
3030type SearchService interface {
31- ListIndexes (context.Context , string , string , string , string , * ListOptions ) ([]* SearchIndex , * Response , error )
32- GetIndex (context.Context , string , string , string ) (* SearchIndex , * Response , error )
33- CreateIndex (context.Context , string , string , * SearchIndex ) (* SearchIndex , * Response , error )
34- UpdateIndex (context.Context , string , string , string , * SearchIndex ) (* SearchIndex , * Response , error )
35- DeleteIndex (context.Context , string , string , string ) (* Response , error )
36- ListAnalyzers (context.Context , string , string , * ListOptions ) ([]* SearchAnalyzer , * Response , error )
37- UpdateAllAnalyzers (context.Context , string , string , []* SearchAnalyzer ) ([]* SearchAnalyzer , * Response , error )
31+ ListIndexes (ctx context.Context , groupID string , clusterName string , databaseName string , collectionName string , opts * ListOptions ) ([]* SearchIndex , * Response , error )
32+ GetIndex (ctx context.Context , groupID , clusterName , indexID string ) (* SearchIndex , * Response , error )
33+ CreateIndex (ctx context.Context , projectID , clusterName string , r * SearchIndex ) (* SearchIndex , * Response , error )
34+ UpdateIndex (ctx context.Context , projectID , clusterName , indexID string , r * SearchIndex ) (* SearchIndex , * Response , error )
35+ DeleteIndex (ctx context.Context , projectID , clusterName , indexID string ) (* Response , error )
36+ ListAnalyzers (ctx context.Context , groupID , clusterName string , listOptions * ListOptions ) ([]* SearchAnalyzer , * Response , error )
37+ UpdateAllAnalyzers (ctx context.Context , groupID , clusterName string , analyzers []* SearchAnalyzer ) ([]* SearchAnalyzer , * Response , error )
3838}
3939
4040// SearchServiceOp provides an implementation of the SearchService interface.
@@ -242,15 +242,15 @@ func (s *SearchServiceOp) UpdateAllAnalyzers(ctx context.Context, groupID, clust
242242
243243// SearchIndex index definition.
244244type SearchIndex struct {
245- Analyzer string `json:"analyzer,omitempty"`
246- Analyzers map [ string ] interface {} `json:"analyzers,omitempty"` // Custom analyzers
247- CollectionName string `json:"collectionName"`
248- Database string `json:"database"`
249- IndexID string `json:"indexID,omitempty"`
250- Mappings * IndexMapping `json:"mappings,omitempty"`
251- Name string `json:"name"`
252- SearchAnalyzer string `json:"searchAnalyzer,omitempty"`
253- Status string `json:"status,omitempty"`
245+ Analyzer string `json:"analyzer,omitempty"`
246+ Analyzers [] * CustomAnalyzer `json:"analyzers,omitempty"` // Custom analyzers
247+ CollectionName string `json:"collectionName"`
248+ Database string `json:"database"`
249+ IndexID string `json:"indexID,omitempty"`
250+ Mappings * IndexMapping `json:"mappings,omitempty"`
251+ Name string `json:"name"`
252+ SearchAnalyzer string `json:"searchAnalyzer,omitempty"`
253+ Status string `json:"status,omitempty"`
254254}
255255
256256// IndexMapping containing index specifications for the collection fields.
@@ -280,7 +280,7 @@ type IndexField struct {
280280 IndexShapes * bool `json:"indexShapes,omitempty"`
281281}
282282
283- // SearchAnalyzer custom analyzer definition.
283+ // SearchAnalyzer search analyzer definition.
284284type SearchAnalyzer struct {
285285 BaseAnalyzer string `json:"baseAnalyzer"`
286286 MaxTokenLength * int `json:"maxTokenLength,omitempty"`
@@ -289,3 +289,126 @@ type SearchAnalyzer struct {
289289 StemExclusionSet []string `json:"stemExclusionSet,omitempty"`
290290 Stopwords []string `json:"stopwords,omitempty"`
291291}
292+
293+ // CustomAnalyzer custom analyzer for index.
294+ type CustomAnalyzer struct {
295+ // Name of the custom analyzer. Names must be unique within an index, and may not start with any of the following strings: lucene, builtin, mongodb
296+ Name string `json:"name"`
297+
298+ // CharFilters Array containing zero or more character filters.
299+ CharFilters []* AnalyzerCharFilter `json:"charFilters,omitempty"`
300+
301+ // Tokenizer to use.
302+ Tokenizer * AnalyzerTokenizer `json:"tokenizer"`
303+
304+ // TokenFilters Array containing zero or more token filters
305+ TokenFilters []* AnalyzerTokenFilters `json:"tokenFilters,omitempty"`
306+ }
307+
308+ // AnalyzerCharFilter Characters filters for custom analyzer. For further information, go to
309+ // https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-char-filters-ref
310+ type AnalyzerCharFilter struct {
311+ // Type The type of this character filter, supports: htmlStrip, icuNormalize, mapping, persian
312+ Type string `json:"type"`
313+
314+ // IgnoreTags A list of HTML tags to exclude from filtering. Apply for type: htmlStrip
315+ IgnoreTags []string `json:"IgnoreTags,omitempty"`
316+
317+ // Mappings An object containing a comma-separated list of mappings. A mapping indicates that one character
318+ // or group of characters should be substituted for another, in the format <original> : <replacement>
319+ // apply for type: mapping
320+ Mappings * map [string ]string `json:"mappings,omitempty"`
321+ }
322+
323+ // AnalyzerTokenizer tokenizer for custom analyzer, for further information, go to
324+ // https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-tokenizers-ref
325+ type AnalyzerTokenizer struct {
326+ // Type The type of this tokenizer. Supports standard, keyword, whitespace, nGram, edgeGram, regexCaptureGroup,
327+ // regexSplit, uaxUrEmail
328+ Type string `json:"type"`
329+
330+ // MaxTokenLength Maximum length for a single token. Tokens greater than this length are split at maxTokenLength
331+ // into multiple tokens. Apply for type: standard, whitespace, uaxUrlEmail
332+ MaxTokenLength * int `json:"maxTokenLength,omitempty"`
333+
334+ // MinGram Number of characters to include in the shortest token created. Apply for type: nGram, edgeGram
335+ MinGram * int `json:"minGram,omitempty"`
336+
337+ // MaxGram Number of characters to include in the longest token created. Apply for type: nGram, edgeGram
338+ MaxGram * int `json:"maxGram,omitempty"`
339+
340+ // Pattern A regular expression to match against. Apply for type: regexCaptureGroup, regexSplit
341+ Pattern string `json:"pattern,omitempty"`
342+
343+ // Group Index of the character group within the matching expression to extract into tokens. Use 0 to extract all
344+ // character groups. Apply for type: regexCaptureGroup
345+ Group * int `json:"group,omitempty"`
346+ }
347+
348+ // AnalyzerTokenFilters token filter for custom analyzer. To get more information, go to
349+ // https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-token-filters-ref
350+ type AnalyzerTokenFilters struct {
351+ // Type The type of this token filter. Supports: daitchMokotoffSoundex, lowercase, length, icuFolding, icuNormalize
352+ // nGram, edgeGram, shingle, regex, snowballStemming, stopword, trim
353+ Type string `json:"type"`
354+
355+ // OriginalTokens Specifies whether to include or omit the original tokens in the output of the token filter. Value can
356+ // be one of the following: include - to include the original tokens with the encoded tokens in the output of the token
357+ // filter. We recommend this value if you want queries on both the original tokens as well as the encoded forms.
358+ // omit - to omit the original tokens and include only the encoded tokens in the output of the token filter.
359+ // Use this value if you want to only query on the encoded forms of the original tokens. Apply for type: daitchMokotoffSoundex
360+ OriginalTokens string `json:"originalTokens,omitempty"`
361+
362+ // Min The minimum length of a token. Must be less than or equal to max. Apply for type: length
363+ Min * int `json:"min,omitempty"`
364+
365+ // Max The maximum length of a token. Must be greater than or equal to min.
366+ Max * int `json:"max,omitempty"`
367+
368+ // NormalizationForm Normalization form to apply. Accepted values are:
369+ // nfd (Canonical Decomposition)
370+ // nfc (Canonical Decomposition, followed by Canonical Composition)
371+ // nfkd (Compatibility Decomposition)
372+ // nfkc (Compatibility Decomposition, followed by Canonical Composition).
373+ // Apply for type: icuNormalize
374+ NormalizationForm string `json:"normalizationForm,omitempty"`
375+
376+ // MinGram The minimum length of generated n-grams. Must be less than or equal to maxGram. Apply for type: nGram, edgeGram
377+ MinGram * int `json:"minGram,omitempty"`
378+
379+ // MaxGram The maximum length of generated n-grams. Must be greater than or equal to minGram. Apply for type: nGram, edgeGram
380+ MaxGram * int `json:"maxGram,omitempty"`
381+
382+ // TermsNotInBounds Accepted values are: include, omit
383+ // If include is specified, tokens shorter than minGram or longer than maxGram are indexed as-is. If omit is specified,
384+ // those tokens are not indexed. Apply for type: nGram, edgeGram
385+ TermsNotInBounds string `json:"termsNotInBounds,omitempty"`
386+
387+ // MinShingleSize Minimum number of tokens per shingle. Must be less than or equal to maxShingleSize. Apply for type: shingle
388+ MinShingleSize * int `json:"minShingleSize,omitempty"`
389+
390+ // MaxShingleSize Maximum number of tokens per shingle. Must be greater than or equal to minShingleSize. Apply for type: shingle
391+ MaxShingleSize * int `json:"maxShingleSize,omitempty"`
392+
393+ // Pattern Regular expression pattern to apply to each token. Apply for type: regex
394+ Pattern string `json:"pattern,omitempty"`
395+
396+ // Replacement Replacement string to substitute wherever a matching pattern occurs. Apply for type: regex
397+ Replacement string `json:"replacement,omitempty"`
398+
399+ // Matches Acceptable values are: all, first
400+ // If matches is set to all, replace all matching patterns. Otherwise, replace only the first matching pattern. Apply for type: regex
401+ Matches string `json:"matches,omitempty"`
402+
403+ // StemmerName Apply for type: snowballStemming
404+ StemmerName string `json:"stemmerName,omitempty"`
405+
406+ // Tokens The list of stop words that correspond to the tokens to remove. Value must be one or more stop words. Apply for type: stopword
407+ Tokens []string `json:"tokens,omitempty"`
408+
409+ // IgnoreCase The flag that indicates whether or not to ignore case of stop words when filtering the tokens to remove. The value can be one of the following:
410+ // true - to ignore case and remove all tokens that match the specified stop words
411+ // false - to be case-sensitive and remove only tokens that exactly match the specified case
412+ // If omitted, defaults to true. Apply for type: stopword
413+ IgnoreCase * bool `json:"ignoreCase,omitempty"`
414+ }
0 commit comments