Skip to content

Commit

Permalink
[codegen] update to latest spec
Browse files Browse the repository at this point in the history
  • Loading branch information
l-trotta committed Oct 28, 2024
1 parent 4526915 commit 494b874
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,11 @@ public class NGramTokenizer extends TokenizerBase implements TokenizerDefinition
@Nullable
private final String customTokenChars;

private final int maxGram;
@Nullable
private final Integer maxGram;

private final int minGram;
@Nullable
private final Integer minGram;

private final List<TokenChar> tokenChars;

Expand All @@ -74,9 +76,9 @@ private NGramTokenizer(Builder builder) {
super(builder);

this.customTokenChars = builder.customTokenChars;
this.maxGram = ApiTypeHelper.requireNonNull(builder.maxGram, this, "maxGram");
this.minGram = ApiTypeHelper.requireNonNull(builder.minGram, this, "minGram");
this.tokenChars = ApiTypeHelper.unmodifiableRequired(builder.tokenChars, this, "tokenChars");
this.maxGram = builder.maxGram;
this.minGram = builder.minGram;
this.tokenChars = ApiTypeHelper.unmodifiable(builder.tokenChars);

}

Expand All @@ -101,21 +103,23 @@ public final String customTokenChars() {
}

/**
* Required - API name: {@code max_gram}
* API name: {@code max_gram}
*/
public final int maxGram() {
@Nullable
public final Integer maxGram() {
return this.maxGram;
}

/**
* Required - API name: {@code min_gram}
* API name: {@code min_gram}
*/
public final int minGram() {
@Nullable
public final Integer minGram() {
return this.minGram;
}

/**
* Required - API name: {@code token_chars}
* API name: {@code token_chars}
*/
public final List<TokenChar> tokenChars() {
return this.tokenChars;
Expand All @@ -130,12 +134,16 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) {
generator.write(this.customTokenChars);

}
generator.writeKey("max_gram");
generator.write(this.maxGram);
if (this.maxGram != null) {
generator.writeKey("max_gram");
generator.write(this.maxGram);

generator.writeKey("min_gram");
generator.write(this.minGram);
}
if (this.minGram != null) {
generator.writeKey("min_gram");
generator.write(this.minGram);

}
if (ApiTypeHelper.isDefined(this.tokenChars)) {
generator.writeKey("token_chars");
generator.writeStartArray();
Expand All @@ -160,10 +168,13 @@ public static class Builder extends TokenizerBase.AbstractBuilder<Builder>
@Nullable
private String customTokenChars;

@Nullable
private Integer maxGram;

@Nullable
private Integer minGram;

@Nullable
private List<TokenChar> tokenChars;

/**
Expand All @@ -175,23 +186,23 @@ public final Builder customTokenChars(@Nullable String value) {
}

/**
* Required - API name: {@code max_gram}
* API name: {@code max_gram}
*/
public final Builder maxGram(int value) {
public final Builder maxGram(@Nullable Integer value) {
this.maxGram = value;
return this;
}

/**
* Required - API name: {@code min_gram}
* API name: {@code min_gram}
*/
public final Builder minGram(int value) {
public final Builder minGram(@Nullable Integer value) {
this.minGram = value;
return this;
}

/**
* Required - API name: {@code token_chars}
* API name: {@code token_chars}
* <p>
* Adds all elements of <code>list</code> to <code>tokenChars</code>.
*/
Expand All @@ -201,7 +212,7 @@ public final Builder tokenChars(List<TokenChar> list) {
}

/**
* Required - API name: {@code token_chars}
* API name: {@code token_chars}
* <p>
* Adds one or more values to <code>tokenChars</code>.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@
'_types.analysis.CharFilter': '_types/analysis/char_filters.ts#L28-L30',
'_types.analysis.CharFilterBase': '_types/analysis/char_filters.ts#L24-L26',
'_types.analysis.CharFilterDefinition': '_types/analysis/char_filters.ts#L32-L41',
'_types.analysis.CharGroupTokenizer': '_types/analysis/tokenizers.ts#L55-L59',
'_types.analysis.CharGroupTokenizer': '_types/analysis/tokenizers.ts#L58-L62',
'_types.analysis.CommonGramsTokenFilter': '_types/analysis/token_filters.ts#L174-L180',
'_types.analysis.CompoundWordTokenFilterBase': '_types/analysis/token_filters.ts#L43-L51',
'_types.analysis.ConditionTokenFilter': '_types/analysis/token_filters.ts#L182-L186',
Expand Down Expand Up @@ -667,7 +667,7 @@
'_types.analysis.KeepWordsTokenFilter': '_types/analysis/token_filters.ts#L225-L230',
'_types.analysis.KeywordAnalyzer': '_types/analysis/analyzers.ts#L47-L50',
'_types.analysis.KeywordMarkerTokenFilter': '_types/analysis/token_filters.ts#L232-L238',
'_types.analysis.KeywordTokenizer': '_types/analysis/tokenizers.ts#L61-L64',
'_types.analysis.KeywordTokenizer': '_types/analysis/tokenizers.ts#L64-L67',
'_types.analysis.KuromojiAnalyzer': '_types/analysis/kuromoji-plugin.ts#L25-L29',
'_types.analysis.KuromojiIterationMarkCharFilter': '_types/analysis/kuromoji-plugin.ts#L31-L35',
'_types.analysis.KuromojiPartOfSpeechTokenFilter': '_types/analysis/kuromoji-plugin.ts#L37-L40',
Expand All @@ -678,26 +678,26 @@
'_types.analysis.Language': '_types/analysis/languages.ts#L20-L55',
'_types.analysis.LanguageAnalyzer': '_types/analysis/analyzers.ts#L52-L59',
'_types.analysis.LengthTokenFilter': '_types/analysis/token_filters.ts#L244-L248',
'_types.analysis.LetterTokenizer': '_types/analysis/tokenizers.ts#L66-L68',
'_types.analysis.LetterTokenizer': '_types/analysis/tokenizers.ts#L69-L71',
'_types.analysis.LimitTokenCountTokenFilter': '_types/analysis/token_filters.ts#L250-L254',
'_types.analysis.LowercaseNormalizer': '_types/analysis/normalizers.ts#L26-L28',
'_types.analysis.LowercaseTokenFilter': '_types/analysis/token_filters.ts#L256-L259',
'_types.analysis.LowercaseTokenizer': '_types/analysis/tokenizers.ts#L70-L72',
'_types.analysis.LowercaseTokenizer': '_types/analysis/tokenizers.ts#L73-L75',
'_types.analysis.MappingCharFilter': '_types/analysis/char_filters.ts#L48-L52',
'_types.analysis.MultiplexerTokenFilter': '_types/analysis/token_filters.ts#L261-L265',
'_types.analysis.NGramTokenFilter': '_types/analysis/token_filters.ts#L267-L272',
'_types.analysis.NGramTokenizer': '_types/analysis/tokenizers.ts#L38-L44',
'_types.analysis.NGramTokenizer': '_types/analysis/tokenizers.ts#L38-L47',
'_types.analysis.NoriAnalyzer': '_types/analysis/analyzers.ts#L66-L72',
'_types.analysis.NoriDecompoundMode': '_types/analysis/tokenizers.ts#L74-L78',
'_types.analysis.NoriDecompoundMode': '_types/analysis/tokenizers.ts#L77-L81',
'_types.analysis.NoriPartOfSpeechTokenFilter': '_types/analysis/token_filters.ts#L274-L277',
'_types.analysis.NoriTokenizer': '_types/analysis/tokenizers.ts#L80-L86',
'_types.analysis.NoriTokenizer': '_types/analysis/tokenizers.ts#L83-L89',
'_types.analysis.Normalizer': '_types/analysis/normalizers.ts#L20-L24',
'_types.analysis.PathHierarchyTokenizer': '_types/analysis/tokenizers.ts#L88-L95',
'_types.analysis.PathHierarchyTokenizer': '_types/analysis/tokenizers.ts#L91-L98',
'_types.analysis.PatternAnalyzer': '_types/analysis/analyzers.ts#L74-L81',
'_types.analysis.PatternCaptureTokenFilter': '_types/analysis/token_filters.ts#L279-L283',
'_types.analysis.PatternReplaceCharFilter': '_types/analysis/char_filters.ts#L54-L59',
'_types.analysis.PatternReplaceTokenFilter': '_types/analysis/token_filters.ts#L285-L291',
'_types.analysis.PatternTokenizer': '_types/analysis/tokenizers.ts#L97-L102',
'_types.analysis.PatternTokenizer': '_types/analysis/tokenizers.ts#L100-L105',
'_types.analysis.PhoneticEncoder': '_types/analysis/phonetic-plugin.ts#L23-L36',
'_types.analysis.PhoneticLanguage': '_types/analysis/phonetic-plugin.ts#L38-L51',
'_types.analysis.PhoneticNameType': '_types/analysis/phonetic-plugin.ts#L53-L57',
Expand All @@ -713,28 +713,28 @@
'_types.analysis.SnowballLanguage': '_types/analysis/languages.ts#L57-L80',
'_types.analysis.SnowballTokenFilter': '_types/analysis/token_filters.ts#L310-L313',
'_types.analysis.StandardAnalyzer': '_types/analysis/analyzers.ts#L95-L99',
'_types.analysis.StandardTokenizer': '_types/analysis/tokenizers.ts#L104-L107',
'_types.analysis.StandardTokenizer': '_types/analysis/tokenizers.ts#L107-L110',
'_types.analysis.StemmerOverrideTokenFilter': '_types/analysis/token_filters.ts#L315-L319',
'_types.analysis.StemmerTokenFilter': '_types/analysis/token_filters.ts#L321-L325',
'_types.analysis.StopAnalyzer': '_types/analysis/analyzers.ts#L101-L106',
'_types.analysis.StopTokenFilter': '_types/analysis/token_filters.ts#L96-L102',
'_types.analysis.SynonymFormat': '_types/analysis/token_filters.ts#L104-L107',
'_types.analysis.SynonymGraphTokenFilter': '_types/analysis/token_filters.ts#L109-L119',
'_types.analysis.SynonymTokenFilter': '_types/analysis/token_filters.ts#L121-L131',
'_types.analysis.TokenChar': '_types/analysis/tokenizers.ts#L46-L53',
'_types.analysis.TokenChar': '_types/analysis/tokenizers.ts#L49-L56',
'_types.analysis.TokenFilter': '_types/analysis/token_filters.ts#L345-L347',
'_types.analysis.TokenFilterBase': '_types/analysis/token_filters.ts#L39-L41',
'_types.analysis.TokenFilterDefinition': '_types/analysis/token_filters.ts#L349-L401',
'_types.analysis.Tokenizer': '_types/analysis/tokenizers.ts#L119-L121',
'_types.analysis.Tokenizer': '_types/analysis/tokenizers.ts#L122-L124',
'_types.analysis.TokenizerBase': '_types/analysis/tokenizers.ts#L26-L28',
'_types.analysis.TokenizerDefinition': '_types/analysis/tokenizers.ts#L123-L141',
'_types.analysis.TokenizerDefinition': '_types/analysis/tokenizers.ts#L126-L144',
'_types.analysis.TrimTokenFilter': '_types/analysis/token_filters.ts#L327-L329',
'_types.analysis.TruncateTokenFilter': '_types/analysis/token_filters.ts#L331-L334',
'_types.analysis.UaxEmailUrlTokenizer': '_types/analysis/tokenizers.ts#L109-L112',
'_types.analysis.UaxEmailUrlTokenizer': '_types/analysis/tokenizers.ts#L112-L115',
'_types.analysis.UniqueTokenFilter': '_types/analysis/token_filters.ts#L336-L339',
'_types.analysis.UppercaseTokenFilter': '_types/analysis/token_filters.ts#L341-L343',
'_types.analysis.WhitespaceAnalyzer': '_types/analysis/analyzers.ts#L108-L111',
'_types.analysis.WhitespaceTokenizer': '_types/analysis/tokenizers.ts#L114-L117',
'_types.analysis.WhitespaceTokenizer': '_types/analysis/tokenizers.ts#L117-L120',
'_types.analysis.WordDelimiterGraphTokenFilter': '_types/analysis/token_filters.ts#L150-L167',
'_types.analysis.WordDelimiterTokenFilter': '_types/analysis/token_filters.ts#L133-L148',
'_types.mapping.AggregateMetricDoubleProperty': '_types/mapping/complex.ts#L61-L66',
Expand Down Expand Up @@ -2876,10 +2876,10 @@
if (hash.length > 1) {
hash = hash.substring(1);
}
window.location = "https://github.com/elastic/elasticsearch-specification/tree/dd79a332a784748acb9fa3a71b6f8c88295219ac/specification/" + (paths[hash] || "");
window.location = "https://github.com/elastic/elasticsearch-specification/tree/1a27080e79e765b656e881871d5d79e939970b95/specification/" + (paths[hash] || "");
</script>
</head>
<body>
Please see the <a href="https://github.com/elastic/elasticsearch-specification/tree/dd79a332a784748acb9fa3a71b6f8c88295219ac/specification/">Elasticsearch API specification</a>.
Please see the <a href="https://github.com/elastic/elasticsearch-specification/tree/1a27080e79e765b656e881871d5d79e939970b95/specification/">Elasticsearch API specification</a>.
</body>
</html>

0 comments on commit 494b874

Please sign in to comment.