Skip to content

Commit 2812432

Browse files
authored
CLOUDP-98424: Do not parse parts of the request in atlas search (#244)
1 parent f55efb4 commit 2812432

File tree

2 files changed

+25
-169
lines changed

2 files changed

+25
-169
lines changed

mongodbatlas/search.go

Lines changed: 11 additions & 155 deletions
Original file line numberDiff line numberDiff line change
@@ -242,42 +242,21 @@ func (s *SearchServiceOp) UpdateAllAnalyzers(ctx context.Context, groupID, clust
242242

243243
// SearchIndex index definition.
244244
type SearchIndex struct {
245-
Analyzer string `json:"analyzer,omitempty"`
246-
Analyzers []*CustomAnalyzer `json:"analyzers,omitempty"` // Custom analyzers
247-
CollectionName string `json:"collectionName"`
248-
Database string `json:"database"`
249-
IndexID string `json:"indexID,omitempty"`
250-
Mappings *IndexMapping `json:"mappings,omitempty"`
251-
Name string `json:"name"`
252-
SearchAnalyzer string `json:"searchAnalyzer,omitempty"`
253-
Status string `json:"status,omitempty"`
245+
Analyzer string `json:"analyzer,omitempty"`
246+
Analyzers []map[string]interface{} `json:"analyzers,omitempty"` // Custom analyzers
247+
CollectionName string `json:"collectionName"`
248+
Database string `json:"database"`
249+
IndexID string `json:"indexID,omitempty"`
250+
Mappings *IndexMapping `json:"mappings,omitempty"`
251+
Name string `json:"name"`
252+
SearchAnalyzer string `json:"searchAnalyzer,omitempty"`
253+
Status string `json:"status,omitempty"`
254254
}
255255

256256
// IndexMapping containing index specifications for the collection fields.
257257
type IndexMapping struct {
258-
Dynamic bool `json:"dynamic"`
259-
Fields *map[string]IndexField `json:"fields,omitempty"`
260-
}
261-
262-
// IndexField field specifications.
263-
type IndexField struct {
264-
Analyzer string `json:"analyzer,omitempty"`
265-
Type string `json:"type"`
266-
Tokenization string `json:"tokenization,omitempty"` // edgeGram|nGram
267-
MinGrams *int `json:"minGrams,omitempty"`
268-
MaxGrams *int `json:"maxGrams,omitempty"`
269-
FoldDiacritics *bool `json:"foldDiacritics,omitempty"`
270-
Fields *map[string]IndexField `json:"fields,omitempty"`
271-
SearchAnalyzer string `json:"searchAnalyzer,omitempty"`
272-
IndexOptions string `json:"indexOptions,omitempty"` // docs|freqs|positions
273-
Store *bool `json:"store,omitempty"`
274-
IgnoreAbove *int `json:"ignoreAbove,omitempty"`
275-
Norms string `json:"norms,omitempty"` // include|omit
276-
Dynamic *bool `json:"dynamic,omitempty"`
277-
Representation string `json:"representation,omitempty"`
278-
IndexIntegers *bool `json:"indexIntegers,omitempty"`
279-
IndexDoubles *bool `json:"indexDoubles,omitempty"`
280-
IndexShapes *bool `json:"indexShapes,omitempty"`
258+
Dynamic bool `json:"dynamic"`
259+
Fields *map[string]interface{} `json:"fields,omitempty"`
281260
}
282261

283262
// SearchAnalyzer search analyzer definition.
@@ -289,126 +268,3 @@ type SearchAnalyzer struct {
289268
StemExclusionSet []string `json:"stemExclusionSet,omitempty"`
290269
Stopwords []string `json:"stopwords,omitempty"`
291270
}
292-
293-
// CustomAnalyzer custom analyzer for index.
294-
type CustomAnalyzer struct {
295-
// Name of the custom analyzer. Names must be unique within an index, and may not start with any of the following strings: lucene, builtin, mongodb
296-
Name string `json:"name"`
297-
298-
// CharFilters Array containing zero or more character filters.
299-
CharFilters []*AnalyzerCharFilter `json:"charFilters,omitempty"`
300-
301-
// Tokenizer to use.
302-
Tokenizer *AnalyzerTokenizer `json:"tokenizer"`
303-
304-
// TokenFilters Array containing zero or more token filters
305-
TokenFilters []*AnalyzerTokenFilters `json:"tokenFilters,omitempty"`
306-
}
307-
308-
// AnalyzerCharFilter Characters filters for custom analyzer. For further information, go to
309-
// https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-char-filters-ref
310-
type AnalyzerCharFilter struct {
311-
// Type The type of this character filter, supports: htmlStrip, icuNormalize, mapping, persian
312-
Type string `json:"type"`
313-
314-
// IgnoreTags A list of HTML tags to exclude from filtering. Apply for type: htmlStrip
315-
IgnoreTags []string `json:"IgnoreTags,omitempty"`
316-
317-
// Mappings An object containing a comma-separated list of mappings. A mapping indicates that one character
318-
// or group of characters should be substituted for another, in the format <original> : <replacement>
319-
// apply for type: mapping
320-
Mappings *map[string]string `json:"mappings,omitempty"`
321-
}
322-
323-
// AnalyzerTokenizer tokenizer for custom analyzer, for further information, go to
324-
// https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-tokenizers-ref
325-
type AnalyzerTokenizer struct {
326-
// Type The type of this tokenizer. Supports standard, keyword, whitespace, nGram, edgeGram, regexCaptureGroup,
327-
// regexSplit, uaxUrEmail
328-
Type string `json:"type"`
329-
330-
// MaxTokenLength Maximum length for a single token. Tokens greater than this length are split at maxTokenLength
331-
// into multiple tokens. Apply for type: standard, whitespace, uaxUrlEmail
332-
MaxTokenLength *int `json:"maxTokenLength,omitempty"`
333-
334-
// MinGram Number of characters to include in the shortest token created. Apply for type: nGram, edgeGram
335-
MinGram *int `json:"minGram,omitempty"`
336-
337-
// MaxGram Number of characters to include in the longest token created. Apply for type: nGram, edgeGram
338-
MaxGram *int `json:"maxGram,omitempty"`
339-
340-
// Pattern A regular expression to match against. Apply for type: regexCaptureGroup, regexSplit
341-
Pattern string `json:"pattern,omitempty"`
342-
343-
// Group Index of the character group within the matching expression to extract into tokens. Use 0 to extract all
344-
// character groups. Apply for type: regexCaptureGroup
345-
Group *int `json:"group,omitempty"`
346-
}
347-
348-
// AnalyzerTokenFilters token filter for custom analyzer. To get more information, go to
349-
// https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/custom/#std-label-token-filters-ref
350-
type AnalyzerTokenFilters struct {
351-
// Type The type of this token filter. Supports: daitchMokotoffSoundex, lowercase, length, icuFolding, icuNormalize
352-
// nGram, edgeGram, shingle, regex, snowballStemming, stopword, trim
353-
Type string `json:"type"`
354-
355-
// OriginalTokens Specifies whether to include or omit the original tokens in the output of the token filter. Value can
356-
// be one of the following: include - to include the original tokens with the encoded tokens in the output of the token
357-
// filter. We recommend this value if you want queries on both the original tokens as well as the encoded forms.
358-
// omit - to omit the original tokens and include only the encoded tokens in the output of the token filter.
359-
// Use this value if you want to only query on the encoded forms of the original tokens. Apply for type: daitchMokotoffSoundex
360-
OriginalTokens string `json:"originalTokens,omitempty"`
361-
362-
// Min The minimum length of a token. Must be less than or equal to max. Apply for type: length
363-
Min *int `json:"min,omitempty"`
364-
365-
// Max The maximum length of a token. Must be greater than or equal to min.
366-
Max *int `json:"max,omitempty"`
367-
368-
// NormalizationForm Normalization form to apply. Accepted values are:
369-
// nfd (Canonical Decomposition)
370-
// nfc (Canonical Decomposition, followed by Canonical Composition)
371-
// nfkd (Compatibility Decomposition)
372-
// nfkc (Compatibility Decomposition, followed by Canonical Composition).
373-
// Apply for type: icuNormalize
374-
NormalizationForm string `json:"normalizationForm,omitempty"`
375-
376-
// MinGram The minimum length of generated n-grams. Must be less than or equal to maxGram. Apply for type: nGram, edgeGram
377-
MinGram *int `json:"minGram,omitempty"`
378-
379-
// MaxGram The maximum length of generated n-grams. Must be greater than or equal to minGram. Apply for type: nGram, edgeGram
380-
MaxGram *int `json:"maxGram,omitempty"`
381-
382-
// TermsNotInBounds Accepted values are: include, omit
383-
// If include is specified, tokens shorter than minGram or longer than maxGram are indexed as-is. If omit is specified,
384-
// those tokens are not indexed. Apply for type: nGram, edgeGram
385-
TermsNotInBounds string `json:"termsNotInBounds,omitempty"`
386-
387-
// MinShingleSize Minimum number of tokens per shingle. Must be less than or equal to maxShingleSize. Apply for type: shingle
388-
MinShingleSize *int `json:"minShingleSize,omitempty"`
389-
390-
// MaxShingleSize Maximum number of tokens per shingle. Must be greater than or equal to minShingleSize. Apply for type: shingle
391-
MaxShingleSize *int `json:"maxShingleSize,omitempty"`
392-
393-
// Pattern Regular expression pattern to apply to each token. Apply for type: regex
394-
Pattern string `json:"pattern,omitempty"`
395-
396-
// Replacement Replacement string to substitute wherever a matching pattern occurs. Apply for type: regex
397-
Replacement string `json:"replacement,omitempty"`
398-
399-
// Matches Acceptable values are: all, first
400-
// If matches is set to all, replace all matching patterns. Otherwise, replace only the first matching pattern. Apply for type: regex
401-
Matches string `json:"matches,omitempty"`
402-
403-
// StemmerName Apply for type: snowballStemming
404-
StemmerName string `json:"stemmerName,omitempty"`
405-
406-
// Tokens The list of stop words that correspond to the tokens to remove. Value must be one or more stop words. Apply for type: stopword
407-
Tokens []string `json:"tokens,omitempty"`
408-
409-
// IgnoreCase The flag that indicates whether or not to ignore case of stop words when filtering the tokens to remove. The value can be one of the following:
410-
// true - to ignore case and remove all tokens that match the specified stop words
411-
// false - to be case-sensitive and remove only tokens that exactly match the specified case
412-
// If omitted, defaults to true. Apply for type: stopword
413-
IgnoreCase *bool `json:"ignoreCase,omitempty"`
414-
}

mongodbatlas/search_test.go

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,14 @@ func TestSearch_ListIndexes(t *testing.T) {
8787
IndexID: "5d1268a980eef518dac0cf41",
8888
Mappings: &IndexMapping{
8989
Dynamic: false,
90-
Fields: &map[string]IndexField{
91-
"genres": {
92-
Analyzer: "lucene.standard",
93-
Type: "string",
90+
Fields: &map[string]interface{}{
91+
"genres": map[string]interface{}{
92+
"analyzer": "lucene.standard",
93+
"type": "string",
9494
},
95-
"plot": {
96-
Analyzer: "lucene.standard",
97-
Type: "string",
95+
"plot": map[string]interface{}{
96+
"analyzer": "lucene.standard",
97+
"type": "string",
9898
},
9999
},
100100
},
@@ -149,14 +149,14 @@ func TestSearch_GetIndex(t *testing.T) {
149149
IndexID: "5d1268a980eef518dac0cf41",
150150
Mappings: &IndexMapping{
151151
Dynamic: false,
152-
Fields: &map[string]IndexField{
153-
"genres": {
154-
Analyzer: "lucene.standard",
155-
Type: "string",
152+
Fields: &map[string]interface{}{
153+
"genres": map[string]interface{}{
154+
"analyzer": "lucene.standard",
155+
"type": "string",
156156
},
157-
"plot": {
158-
Analyzer: "lucene.standard",
159-
Type: "string",
157+
"plot": map[string]interface{}{
158+
"analyzer": "lucene.standard",
159+
"type": "string",
160160
},
161161
},
162162
},

0 commit comments

Comments
 (0)