diff --git a/elasticsearch_dsl/document_base.py b/elasticsearch_dsl/document_base.py
index 23b10c0a..83445aec 100644
--- a/elasticsearch_dsl/document_base.py
+++ b/elasticsearch_dsl/document_base.py
@@ -165,7 +165,7 @@ def __init__(self, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]):
fields.update(annotations.keys())
field_defaults = {}
for name in fields:
- value = None
+ value: Any = None
required = None
multi = None
if name in annotations:
@@ -201,7 +201,7 @@ def __init__(self, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]):
field_args = [type_]
elif type_ in self.type_annotation_map:
# use best field type for the type hint provided
- field, field_kwargs = self.type_annotation_map[type_]
+ field, field_kwargs = self.type_annotation_map[type_] # type: ignore
if field:
field_kwargs = {
diff --git a/elasticsearch_dsl/response/__init__.py b/elasticsearch_dsl/response/__init__.py
index ce2b6672..51c24ff9 100644
--- a/elasticsearch_dsl/response/__init__.py
+++ b/elasticsearch_dsl/response/__init__.py
@@ -22,7 +22,9 @@
Generic,
Iterator,
List,
+ Mapping,
Optional,
+ Sequence,
Tuple,
Union,
cast,
@@ -32,6 +34,7 @@
from .hit import Hit, HitMeta
if TYPE_CHECKING:
+ from .. import types
from ..aggs import Agg
from ..faceted_search_base import FacetedSearchBase
from ..search_base import Request, SearchBase
@@ -41,11 +44,47 @@
class Response(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch response.
+
+ :arg took: (required)
+ :arg timed_out: (required)
+ :arg _shards: (required)
+ :arg hits: search results
+ :arg aggregations: aggregation results
+ :arg _clusters:
+ :arg fields:
+ :arg max_score:
+ :arg num_reduce_phases:
+ :arg profile:
+ :arg pit_id:
+ :arg _scroll_id:
+ :arg suggest:
+ :arg terminated_early:
+ """
+
_search: "SearchBase[_R]"
_faceted_search: "FacetedSearchBase[_R]"
_doc_class: Optional[_R]
_hits: List[_R]
+ took: int
+ timed_out: bool
+ _shards: "types.ShardStatistics"
+ _clusters: "types.ClusterStatistics"
+ fields: Mapping[str, Any]
+ max_score: float
+ num_reduce_phases: int
+ profile: "types.Profile"
+ pit_id: str
+ _scroll_id: str
+ suggest: Mapping[
+ str,
+ Sequence[
+ Union["types.CompletionSuggest", "types.PhraseSuggest", "types.TermSuggest"]
+ ],
+ ]
+ terminated_early: bool
+
def __init__(
self,
search: "Request[_R]",
@@ -176,8 +215,45 @@ def __iter__(self) -> Iterator["Agg"]: # type: ignore[override]
class UpdateByQueryResponse(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch update by query response.
+
+ :arg batches:
+ :arg failures:
+ :arg noops:
+ :arg deleted:
+ :arg requests_per_second:
+ :arg retries:
+ :arg task:
+ :arg timed_out:
+ :arg took:
+ :arg total:
+ :arg updated:
+ :arg version_conflicts:
+ :arg throttled:
+ :arg throttled_millis:
+ :arg throttled_until:
+ :arg throttled_until_millis:
+ """
+
_search: "UpdateByQueryBase[_R]"
+ batches: int
+ failures: Sequence["types.BulkIndexByScrollFailure"]
+ noops: int
+ deleted: int
+ requests_per_second: float
+ retries: "types.Retries"
+ task: Union[str, int]
+ timed_out: bool
+ took: Any
+ total: int
+ updated: int
+ version_conflicts: int
+ throttled: Any
+ throttled_millis: Any
+ throttled_until: Any
+ throttled_until_millis: Any
+
def __init__(
self,
search: "Request[_R]",
diff --git a/elasticsearch_dsl/types.py b/elasticsearch_dsl/types.py
index cfb42791..64918ce3 100644
--- a/elasticsearch_dsl/types.py
+++ b/elasticsearch_dsl/types.py
@@ -82,6 +82,71 @@ def __init__(
super().__init__(kwargs)
+class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]):
+ """
+ :arg indicator: (required) The indicator with which to correlate the
+ configured `bucket_path` values.
+ """
+
+ indicator: Union[
+ "BucketCorrelationFunctionCountCorrelationIndicator",
+ Dict[str, Any],
+ DefaultType,
+ ]
+
+ def __init__(
+ self,
+ *,
+ indicator: Union[
+ "BucketCorrelationFunctionCountCorrelationIndicator",
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if indicator is not DEFAULT:
+ kwargs["indicator"] = indicator
+ super().__init__(kwargs)
+
+
+class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]):
+ """
+ :arg doc_count: (required) The total number of documents that
+ initially created the expectations. It’s required to be greater
+ than or equal to the sum of all values in the buckets_path as this
+ is the originating superset of data to which the term values are
+ correlated.
+ :arg expectations: (required) An array of numbers with which to
+ correlate the configured `bucket_path` values. The length of this
+ value must always equal the number of buckets returned by the
+ `bucket_path`.
+ :arg fractions: An array of fractions to use when averaging and
+ calculating variance. This should be used if the pre-calculated
+ data and the buckets_path have known gaps. The length of
+ fractions, if provided, must equal expectations.
+ """
+
+ doc_count: Union[int, DefaultType]
+ expectations: Union[Sequence[float], DefaultType]
+ fractions: Union[Sequence[float], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ doc_count: Union[int, DefaultType] = DEFAULT,
+ expectations: Union[Sequence[float], DefaultType] = DEFAULT,
+ fractions: Union[Sequence[float], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if doc_count is not DEFAULT:
+ kwargs["doc_count"] = doc_count
+ if expectations is not DEFAULT:
+ kwargs["expectations"] = expectations
+ if fractions is not DEFAULT:
+ kwargs["fractions"] = fractions
+ super().__init__(kwargs)
+
+
class BucketPathAggregation(Aggregation):
"""
:arg buckets_path: Path to the buckets that contain one set of values
@@ -130,6 +195,52 @@ def __init__(
super().__init__(kwargs)
+class ClassificationInferenceOptions(AttrDict[Any]):
+ """
+ :arg num_top_classes: Specifies the number of top class predictions to
+ return. Defaults to 0.
+ :arg num_top_feature_importance_values: Specifies the maximum number
+ of feature importance values per document.
+ :arg prediction_field_type: Specifies the type of the predicted field
+ to write. Acceptable values are: string, number, boolean. When
+ boolean is provided 1.0 is transformed to true and 0.0 to false.
+ :arg results_field: The field that is added to incoming documents to
+ contain the inference prediction. Defaults to predicted_value.
+ :arg top_classes_results_field: Specifies the field to which the top
+ classes are written. Defaults to top_classes.
+ """
+
+ num_top_classes: Union[int, DefaultType]
+ num_top_feature_importance_values: Union[int, DefaultType]
+ prediction_field_type: Union[str, DefaultType]
+ results_field: Union[str, DefaultType]
+ top_classes_results_field: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ num_top_classes: Union[int, DefaultType] = DEFAULT,
+ num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
+ prediction_field_type: Union[str, DefaultType] = DEFAULT,
+ results_field: Union[str, DefaultType] = DEFAULT,
+ top_classes_results_field: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if num_top_classes is not DEFAULT:
+ kwargs["num_top_classes"] = num_top_classes
+ if num_top_feature_importance_values is not DEFAULT:
+ kwargs["num_top_feature_importance_values"] = (
+ num_top_feature_importance_values
+ )
+ if prediction_field_type is not DEFAULT:
+ kwargs["prediction_field_type"] = prediction_field_type
+ if results_field is not DEFAULT:
+ kwargs["results_field"] = results_field
+ if top_classes_results_field is not DEFAULT:
+ kwargs["top_classes_results_field"] = top_classes_results_field
+ super().__init__(kwargs)
+
+
class QueryBase(AttrDict[Any]):
"""
:arg boost: Floating point number used to decrease or increase the
@@ -366,221 +477,513 @@ def __init__(
super().__init__(kwargs)
-class FrequentItemSetsField(AttrDict[Any]):
+class FieldCollapse(AttrDict[Any]):
"""
- :arg field: (required)
- :arg exclude: Values to exclude. Can be regular expression strings or
- arrays of strings of exact terms.
- :arg include: Values to include. Can be regular expression strings or
- arrays of strings of exact terms.
+ :arg field: (required) The field to collapse the result set on
+ :arg inner_hits: The number of inner hits and their sort order
+ :arg max_concurrent_group_searches: The number of concurrent requests
+ allowed to retrieve the inner_hits per group
+ :arg collapse:
"""
field: Union[str, InstrumentedField, DefaultType]
- exclude: Union[str, Sequence[str], DefaultType]
- include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType]
+ inner_hits: Union[
+ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
+ ]
+ max_concurrent_group_searches: Union[int, DefaultType]
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
def __init__(
self,
*,
field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- exclude: Union[str, Sequence[str], DefaultType] = DEFAULT,
- include: Union[
- str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType
+ inner_hits: Union[
+ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
+ max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT,
+ collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
if field is not DEFAULT:
kwargs["field"] = str(field)
- if exclude is not DEFAULT:
- kwargs["exclude"] = exclude
- if include is not DEFAULT:
- kwargs["include"] = include
+ if inner_hits is not DEFAULT:
+ kwargs["inner_hits"] = inner_hits
+ if max_concurrent_group_searches is not DEFAULT:
+ kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches
+ if collapse is not DEFAULT:
+ kwargs["collapse"] = collapse
super().__init__(kwargs)
-class FunctionScoreContainer(AttrDict[Any]):
+class FieldLookup(AttrDict[Any]):
"""
- :arg exp: Function that scores a document with a exponential decay,
- depending on the distance of a numeric field value of the document
- from an origin.
- :arg gauss: Function that scores a document with a normal decay,
- depending on the distance of a numeric field value of the document
- from an origin.
- :arg linear: Function that scores a document with a linear decay,
- depending on the distance of a numeric field value of the document
- from an origin.
- :arg field_value_factor: Function allows you to use a field from a
- document to influence the score. It’s similar to using the
- script_score function, however, it avoids the overhead of
- scripting.
- :arg random_score: Generates scores that are uniformly distributed
- from 0 up to but not including 1. In case you want scores to be
- reproducible, it is possible to provide a `seed` and `field`.
- :arg script_score: Enables you to wrap another query and customize the
- scoring of it optionally with a computation derived from other
- numeric field values in the doc using a script expression.
- :arg filter:
- :arg weight:
+ :arg id: (required) `id` of the document.
+ :arg index: Index from which to retrieve the document.
+ :arg path: Name of the field.
+ :arg routing: Custom routing value.
"""
- exp: Union[function.DecayFunction, DefaultType]
- gauss: Union[function.DecayFunction, DefaultType]
- linear: Union[function.DecayFunction, DefaultType]
- field_value_factor: Union[function.FieldValueFactorScore, DefaultType]
- random_score: Union[function.RandomScore, DefaultType]
- script_score: Union[function.ScriptScore, DefaultType]
- filter: Union[Query, DefaultType]
- weight: Union[float, DefaultType]
+ id: Union[str, DefaultType]
+ index: Union[str, DefaultType]
+ path: Union[str, InstrumentedField, DefaultType]
+ routing: Union[str, DefaultType]
def __init__(
self,
*,
- exp: Union[function.DecayFunction, DefaultType] = DEFAULT,
- gauss: Union[function.DecayFunction, DefaultType] = DEFAULT,
- linear: Union[function.DecayFunction, DefaultType] = DEFAULT,
- field_value_factor: Union[
- function.FieldValueFactorScore, DefaultType
- ] = DEFAULT,
- random_score: Union[function.RandomScore, DefaultType] = DEFAULT,
- script_score: Union[function.ScriptScore, DefaultType] = DEFAULT,
- filter: Union[Query, DefaultType] = DEFAULT,
- weight: Union[float, DefaultType] = DEFAULT,
+ id: Union[str, DefaultType] = DEFAULT,
+ index: Union[str, DefaultType] = DEFAULT,
+ path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ routing: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if exp is not DEFAULT:
- kwargs["exp"] = exp
- if gauss is not DEFAULT:
- kwargs["gauss"] = gauss
- if linear is not DEFAULT:
- kwargs["linear"] = linear
- if field_value_factor is not DEFAULT:
- kwargs["field_value_factor"] = field_value_factor
- if random_score is not DEFAULT:
- kwargs["random_score"] = random_score
- if script_score is not DEFAULT:
- kwargs["script_score"] = script_score
- if filter is not DEFAULT:
- kwargs["filter"] = filter
- if weight is not DEFAULT:
- kwargs["weight"] = weight
+ if id is not DEFAULT:
+ kwargs["id"] = id
+ if index is not DEFAULT:
+ kwargs["index"] = index
+ if path is not DEFAULT:
+ kwargs["path"] = str(path)
+ if routing is not DEFAULT:
+ kwargs["routing"] = routing
super().__init__(kwargs)
-class FuzzyQuery(QueryBase):
+class FieldSort(AttrDict[Any]):
"""
- :arg value: (required) Term you wish to find in the provided field.
- :arg max_expansions: Maximum number of variations created. Defaults to
- `50` if omitted.
- :arg prefix_length: Number of beginning characters left unchanged when
- creating expansions.
- :arg rewrite: Number of beginning characters left unchanged when
- creating expansions. Defaults to `constant_score` if omitted.
- :arg transpositions: Indicates whether edits include transpositions of
- two adjacent characters (for example `ab` to `ba`). Defaults to
- `True` if omitted.
- :arg fuzziness: Maximum edit distance allowed for matching.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg missing:
+ :arg mode:
+ :arg nested:
+ :arg order:
+ :arg unmapped_type:
+ :arg numeric_type:
+ :arg format:
"""
- value: Union[str, float, bool, DefaultType]
- max_expansions: Union[int, DefaultType]
- prefix_length: Union[int, DefaultType]
- rewrite: Union[str, DefaultType]
- transpositions: Union[bool, DefaultType]
- fuzziness: Union[str, int, DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ missing: Union[str, int, float, bool, DefaultType]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ unmapped_type: Union[
+ Literal[
+ "none",
+ "geo_point",
+ "geo_shape",
+ "ip",
+ "binary",
+ "keyword",
+ "text",
+ "search_as_you_type",
+ "date",
+ "date_nanos",
+ "boolean",
+ "completion",
+ "nested",
+ "object",
+ "version",
+ "murmur3",
+ "token_count",
+ "percolator",
+ "integer",
+ "long",
+ "short",
+ "byte",
+ "float",
+ "half_float",
+ "scaled_float",
+ "double",
+ "integer_range",
+ "float_range",
+ "long_range",
+ "double_range",
+ "date_range",
+ "ip_range",
+ "alias",
+ "join",
+ "rank_feature",
+ "rank_features",
+ "flattened",
+ "shape",
+ "histogram",
+ "constant_keyword",
+ "aggregate_metric_double",
+ "dense_vector",
+ "semantic_text",
+ "sparse_vector",
+ "match_only_text",
+ "icu_collation_keyword",
+ ],
+ DefaultType,
+ ]
+ numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType]
+ format: Union[str, DefaultType]
def __init__(
self,
*,
- value: Union[str, float, bool, DefaultType] = DEFAULT,
- max_expansions: Union[int, DefaultType] = DEFAULT,
- prefix_length: Union[int, DefaultType] = DEFAULT,
- rewrite: Union[str, DefaultType] = DEFAULT,
- transpositions: Union[bool, DefaultType] = DEFAULT,
- fuzziness: Union[str, int, DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
+ ] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ unmapped_type: Union[
+ Literal[
+ "none",
+ "geo_point",
+ "geo_shape",
+ "ip",
+ "binary",
+ "keyword",
+ "text",
+ "search_as_you_type",
+ "date",
+ "date_nanos",
+ "boolean",
+ "completion",
+ "nested",
+ "object",
+ "version",
+ "murmur3",
+ "token_count",
+ "percolator",
+ "integer",
+ "long",
+ "short",
+ "byte",
+ "float",
+ "half_float",
+ "scaled_float",
+ "double",
+ "integer_range",
+ "float_range",
+ "long_range",
+ "double_range",
+ "date_range",
+ "ip_range",
+ "alias",
+ "join",
+ "rank_feature",
+ "rank_features",
+ "flattened",
+ "shape",
+ "histogram",
+ "constant_keyword",
+ "aggregate_metric_double",
+ "dense_vector",
+ "semantic_text",
+ "sparse_vector",
+ "match_only_text",
+ "icu_collation_keyword",
+ ],
+ DefaultType,
+ ] = DEFAULT,
+ numeric_type: Union[
+ Literal["long", "double", "date", "date_nanos"], DefaultType
+ ] = DEFAULT,
+ format: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if value is not DEFAULT:
- kwargs["value"] = value
- if max_expansions is not DEFAULT:
- kwargs["max_expansions"] = max_expansions
- if prefix_length is not DEFAULT:
- kwargs["prefix_length"] = prefix_length
- if rewrite is not DEFAULT:
- kwargs["rewrite"] = rewrite
- if transpositions is not DEFAULT:
- kwargs["transpositions"] = transpositions
- if fuzziness is not DEFAULT:
- kwargs["fuzziness"] = fuzziness
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
-
-
-class GeoHashLocation(AttrDict[Any]):
- """
- :arg geohash: (required)
- """
-
- geohash: Union[str, DefaultType]
-
- def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
- if geohash is not DEFAULT:
- kwargs["geohash"] = geohash
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if unmapped_type is not DEFAULT:
+ kwargs["unmapped_type"] = unmapped_type
+ if numeric_type is not DEFAULT:
+ kwargs["numeric_type"] = numeric_type
+ if format is not DEFAULT:
+ kwargs["format"] = format
super().__init__(kwargs)
-class GeoLinePoint(AttrDict[Any]):
+class FrequentItemSetsField(AttrDict[Any]):
"""
- :arg field: (required) The name of the geo_point field.
+ :arg field: (required)
+ :arg exclude: Values to exclude. Can be regular expression strings or
+ arrays of strings of exact terms.
+ :arg include: Values to include. Can be regular expression strings or
+ arrays of strings of exact terms.
"""
field: Union[str, InstrumentedField, DefaultType]
+ exclude: Union[str, Sequence[str], DefaultType]
+ include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType]
def __init__(
self,
*,
field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ exclude: Union[str, Sequence[str], DefaultType] = DEFAULT,
+ include: Union[
+ str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType
+ ] = DEFAULT,
**kwargs: Any,
):
if field is not DEFAULT:
kwargs["field"] = str(field)
+ if exclude is not DEFAULT:
+ kwargs["exclude"] = exclude
+ if include is not DEFAULT:
+ kwargs["include"] = include
super().__init__(kwargs)
-class GeoLineSort(AttrDict[Any]):
+class FunctionScoreContainer(AttrDict[Any]):
"""
- :arg field: (required) The name of the numeric field to use as the
- sort key for ordering the points.
+ :arg exp: Function that scores a document with a exponential decay,
+ depending on the distance of a numeric field value of the document
+ from an origin.
+ :arg gauss: Function that scores a document with a normal decay,
+ depending on the distance of a numeric field value of the document
+ from an origin.
+ :arg linear: Function that scores a document with a linear decay,
+ depending on the distance of a numeric field value of the document
+ from an origin.
+ :arg field_value_factor: Function allows you to use a field from a
+ document to influence the score. It’s similar to using the
+ script_score function, however, it avoids the overhead of
+ scripting.
+ :arg random_score: Generates scores that are uniformly distributed
+ from 0 up to but not including 1. In case you want scores to be
+ reproducible, it is possible to provide a `seed` and `field`.
+ :arg script_score: Enables you to wrap another query and customize the
+ scoring of it optionally with a computation derived from other
+ numeric field values in the doc using a script expression.
+ :arg filter:
+ :arg weight:
"""
- field: Union[str, InstrumentedField, DefaultType]
+ exp: Union[function.DecayFunction, DefaultType]
+ gauss: Union[function.DecayFunction, DefaultType]
+ linear: Union[function.DecayFunction, DefaultType]
+ field_value_factor: Union[function.FieldValueFactorScore, DefaultType]
+ random_score: Union[function.RandomScore, DefaultType]
+ script_score: Union[function.ScriptScore, DefaultType]
+ filter: Union[Query, DefaultType]
+ weight: Union[float, DefaultType]
def __init__(
self,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ exp: Union[function.DecayFunction, DefaultType] = DEFAULT,
+ gauss: Union[function.DecayFunction, DefaultType] = DEFAULT,
+ linear: Union[function.DecayFunction, DefaultType] = DEFAULT,
+ field_value_factor: Union[
+ function.FieldValueFactorScore, DefaultType
+ ] = DEFAULT,
+ random_score: Union[function.RandomScore, DefaultType] = DEFAULT,
+ script_score: Union[function.ScriptScore, DefaultType] = DEFAULT,
+ filter: Union[Query, DefaultType] = DEFAULT,
+ weight: Union[float, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
+ if exp is not DEFAULT:
+ kwargs["exp"] = exp
+ if gauss is not DEFAULT:
+ kwargs["gauss"] = gauss
+ if linear is not DEFAULT:
+ kwargs["linear"] = linear
+ if field_value_factor is not DEFAULT:
+ kwargs["field_value_factor"] = field_value_factor
+ if random_score is not DEFAULT:
+ kwargs["random_score"] = random_score
+ if script_score is not DEFAULT:
+ kwargs["script_score"] = script_score
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ if weight is not DEFAULT:
+ kwargs["weight"] = weight
super().__init__(kwargs)
-class GeoPolygonPoints(AttrDict[Any]):
- """
- :arg points: (required)
+class FuzzyQuery(QueryBase):
+ """
+ :arg value: (required) Term you wish to find in the provided field.
+ :arg max_expansions: Maximum number of variations created. Defaults to
+ `50` if omitted.
+ :arg prefix_length: Number of beginning characters left unchanged when
+ creating expansions.
+ :arg rewrite: Number of beginning characters left unchanged when
+ creating expansions. Defaults to `constant_score` if omitted.
+ :arg transpositions: Indicates whether edits include transpositions of
+ two adjacent characters (for example `ab` to `ba`). Defaults to
+ `True` if omitted.
+ :arg fuzziness: Maximum edit distance allowed for matching.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
+ """
+
+ value: Union[str, float, bool, DefaultType]
+ max_expansions: Union[int, DefaultType]
+ prefix_length: Union[int, DefaultType]
+ rewrite: Union[str, DefaultType]
+ transpositions: Union[bool, DefaultType]
+ fuzziness: Union[str, int, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ value: Union[str, float, bool, DefaultType] = DEFAULT,
+ max_expansions: Union[int, DefaultType] = DEFAULT,
+ prefix_length: Union[int, DefaultType] = DEFAULT,
+ rewrite: Union[str, DefaultType] = DEFAULT,
+ transpositions: Union[bool, DefaultType] = DEFAULT,
+ fuzziness: Union[str, int, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if value is not DEFAULT:
+ kwargs["value"] = value
+ if max_expansions is not DEFAULT:
+ kwargs["max_expansions"] = max_expansions
+ if prefix_length is not DEFAULT:
+ kwargs["prefix_length"] = prefix_length
+ if rewrite is not DEFAULT:
+ kwargs["rewrite"] = rewrite
+ if transpositions is not DEFAULT:
+ kwargs["transpositions"] = transpositions
+ if fuzziness is not DEFAULT:
+ kwargs["fuzziness"] = fuzziness
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
+
+
+class GeoDistanceSort(AttrDict[Any]):
+ """
+ :arg _field: The field to use in this query.
+ :arg _value: The query value for the field.
+ :arg mode:
+ :arg distance_type:
+ :arg ignore_unmapped:
+ :arg order:
+ :arg unit:
+ :arg nested:
+ """
+
+ _field: Union[str, "InstrumentedField", "DefaultType"]
+ _value: Union[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+ Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]],
+ Dict[str, Any],
+ "DefaultType",
+ ]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ distance_type: Union[Literal["arc", "plane"], DefaultType]
+ ignore_unmapped: Union[bool, DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ unit: Union[
+ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ ]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ _value: Union[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+ Sequence[
+ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+ ],
+ Dict[str, Any],
+ "DefaultType",
+ ] = DEFAULT,
+ *,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
+ ] = DEFAULT,
+ distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT,
+ ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ unit: Union[
+ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ ] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if _field is not DEFAULT:
+ kwargs[str(_field)] = _value
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if distance_type is not DEFAULT:
+ kwargs["distance_type"] = distance_type
+ if ignore_unmapped is not DEFAULT:
+ kwargs["ignore_unmapped"] = ignore_unmapped
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if unit is not DEFAULT:
+ kwargs["unit"] = unit
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
+ super().__init__(kwargs)
+
+
+class GeoHashLocation(AttrDict[Any]):
+ """
+ :arg geohash: (required)
+ """
+
+ geohash: Union[str, DefaultType]
+
+ def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
+ if geohash is not DEFAULT:
+ kwargs["geohash"] = geohash
+ super().__init__(kwargs)
+
+
+class GeoLinePoint(AttrDict[Any]):
+ """
+ :arg field: (required) The name of the geo_point field.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ super().__init__(kwargs)
+
+
+class GeoLineSort(AttrDict[Any]):
+ """
+ :arg field: (required) The name of the numeric field to use as the
+ sort key for ordering the points.
+ """
+
+ field: Union[str, InstrumentedField, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ super().__init__(kwargs)
+
+
+class GeoPolygonPoints(AttrDict[Any]):
+ """
+ :arg points: (required)
"""
points: Union[
@@ -1027,13 +1430,200 @@ def __init__(
super().__init__(**kwargs)
-class HoltLinearModelSettings(AttrDict[Any]):
+class HighlightField(HighlightBase):
"""
- :arg alpha:
- :arg beta:
+ :arg fragment_offset:
+ :arg matched_fields:
+ :arg analyzer:
+ :arg type:
+ :arg boundary_chars: A string that contains each boundary character.
+ Defaults to `.,!? \t\n` if omitted.
+ :arg boundary_max_scan: How far to scan for boundary characters.
+ Defaults to `20` if omitted.
+ :arg boundary_scanner: Specifies how to break the highlighted
+ fragments: chars, sentence, or word. Only valid for the unified
+ and fvh highlighters. Defaults to `sentence` for the `unified`
+ highlighter. Defaults to `chars` for the `fvh` highlighter.
+ :arg boundary_scanner_locale: Controls which locale is used to search
+ for sentence and word boundaries. This parameter takes a form of a
+ language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
+ Defaults to `Locale.ROOT` if omitted.
+ :arg force_source:
+ :arg fragmenter: Specifies how text should be broken up in highlight
+ snippets: `simple` or `span`. Only valid for the `plain`
+ highlighter. Defaults to `span` if omitted.
+ :arg fragment_size: The size of the highlighted fragment in
+ characters. Defaults to `100` if omitted.
+ :arg highlight_filter:
+ :arg highlight_query: Highlight matches for a query other than the
+ search query. This is especially useful if you use a rescore query
+ because those are not taken into account by highlighting by
+ default.
+ :arg max_fragment_length:
+ :arg max_analyzed_offset: If set to a non-negative value, highlighting
+ stops at this defined maximum limit. The rest of the text is not
+ processed, thus not highlighted and no error is returned The
+ `max_analyzed_offset` query setting does not override the
+ `index.highlight.max_analyzed_offset` setting, which prevails when
+ it’s set to lower value than the query setting.
+ :arg no_match_size: The amount of text you want to return from the
+ beginning of the field if there are no matching fragments to
+ highlight.
+ :arg number_of_fragments: The maximum number of fragments to return.
+ If the number of fragments is set to `0`, no fragments are
+ returned. Instead, the entire field contents are highlighted and
+ returned. This can be handy when you need to highlight short texts
+ such as a title or address, but fragmentation is not required. If
+ `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
+ to `5` if omitted.
+ :arg options:
+ :arg order: Sorts highlighted fragments by score when set to `score`.
+ By default, fragments will be output in the order they appear in
+ the field (order: `none`). Setting this option to `score` will
+ output the most relevant fragments first. Each highlighter applies
+ its own logic to compute relevancy scores. Defaults to `none` if
+ omitted.
+ :arg phrase_limit: Controls the number of matching phrases in a
+ document that are considered. Prevents the `fvh` highlighter from
+ analyzing too many phrases and consuming too much memory. When
+ using `matched_fields`, `phrase_limit` phrases per matched field
+ are considered. Raising the limit increases query time and
+ consumes more memory. Only supported by the `fvh` highlighter.
+ Defaults to `256` if omitted.
+ :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
+ tags to use for the highlighted text. By default, highlighted text
+ is wrapped in `` and `` tags.
+ :arg require_field_match: By default, only fields that contains a
+ query match are highlighted. Set to `false` to highlight all
+ fields. Defaults to `True` if omitted.
+ :arg tags_schema: Set to `styled` to use the built-in tag schema.
"""
- alpha: Union[float, DefaultType]
+ fragment_offset: Union[int, DefaultType]
+ matched_fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ]
+ analyzer: Union[str, Dict[str, Any], DefaultType]
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType]
+ boundary_chars: Union[str, DefaultType]
+ boundary_max_scan: Union[int, DefaultType]
+ boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
+ boundary_scanner_locale: Union[str, DefaultType]
+ force_source: Union[bool, DefaultType]
+ fragmenter: Union[Literal["simple", "span"], DefaultType]
+ fragment_size: Union[int, DefaultType]
+ highlight_filter: Union[bool, DefaultType]
+ highlight_query: Union[Query, DefaultType]
+ max_fragment_length: Union[int, DefaultType]
+ max_analyzed_offset: Union[int, DefaultType]
+ no_match_size: Union[int, DefaultType]
+ number_of_fragments: Union[int, DefaultType]
+ options: Union[Mapping[str, Any], DefaultType]
+ order: Union[Literal["score"], DefaultType]
+ phrase_limit: Union[int, DefaultType]
+ post_tags: Union[Sequence[str], DefaultType]
+ pre_tags: Union[Sequence[str], DefaultType]
+ require_field_match: Union[bool, DefaultType]
+ tags_schema: Union[Literal["styled"], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ fragment_offset: Union[int, DefaultType] = DEFAULT,
+ matched_fields: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
+ DefaultType,
+ ] = DEFAULT,
+ analyzer: Union[str, Dict[str, Any], DefaultType] = DEFAULT,
+ type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
+ boundary_chars: Union[str, DefaultType] = DEFAULT,
+ boundary_max_scan: Union[int, DefaultType] = DEFAULT,
+ boundary_scanner: Union[
+ Literal["chars", "sentence", "word"], DefaultType
+ ] = DEFAULT,
+ boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
+ force_source: Union[bool, DefaultType] = DEFAULT,
+ fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
+ fragment_size: Union[int, DefaultType] = DEFAULT,
+ highlight_filter: Union[bool, DefaultType] = DEFAULT,
+ highlight_query: Union[Query, DefaultType] = DEFAULT,
+ max_fragment_length: Union[int, DefaultType] = DEFAULT,
+ max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
+ no_match_size: Union[int, DefaultType] = DEFAULT,
+ number_of_fragments: Union[int, DefaultType] = DEFAULT,
+ options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["score"], DefaultType] = DEFAULT,
+ phrase_limit: Union[int, DefaultType] = DEFAULT,
+ post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+ require_field_match: Union[bool, DefaultType] = DEFAULT,
+ tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if fragment_offset is not DEFAULT:
+ kwargs["fragment_offset"] = fragment_offset
+ if matched_fields is not DEFAULT:
+ kwargs["matched_fields"] = str(matched_fields)
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ if boundary_chars is not DEFAULT:
+ kwargs["boundary_chars"] = boundary_chars
+ if boundary_max_scan is not DEFAULT:
+ kwargs["boundary_max_scan"] = boundary_max_scan
+ if boundary_scanner is not DEFAULT:
+ kwargs["boundary_scanner"] = boundary_scanner
+ if boundary_scanner_locale is not DEFAULT:
+ kwargs["boundary_scanner_locale"] = boundary_scanner_locale
+ if force_source is not DEFAULT:
+ kwargs["force_source"] = force_source
+ if fragmenter is not DEFAULT:
+ kwargs["fragmenter"] = fragmenter
+ if fragment_size is not DEFAULT:
+ kwargs["fragment_size"] = fragment_size
+ if highlight_filter is not DEFAULT:
+ kwargs["highlight_filter"] = highlight_filter
+ if highlight_query is not DEFAULT:
+ kwargs["highlight_query"] = highlight_query
+ if max_fragment_length is not DEFAULT:
+ kwargs["max_fragment_length"] = max_fragment_length
+ if max_analyzed_offset is not DEFAULT:
+ kwargs["max_analyzed_offset"] = max_analyzed_offset
+ if no_match_size is not DEFAULT:
+ kwargs["no_match_size"] = no_match_size
+ if number_of_fragments is not DEFAULT:
+ kwargs["number_of_fragments"] = number_of_fragments
+ if options is not DEFAULT:
+ kwargs["options"] = options
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if phrase_limit is not DEFAULT:
+ kwargs["phrase_limit"] = phrase_limit
+ if post_tags is not DEFAULT:
+ kwargs["post_tags"] = post_tags
+ if pre_tags is not DEFAULT:
+ kwargs["pre_tags"] = pre_tags
+ if require_field_match is not DEFAULT:
+ kwargs["require_field_match"] = require_field_match
+ if tags_schema is not DEFAULT:
+ kwargs["tags_schema"] = tags_schema
+ super().__init__(**kwargs)
+
+
+class HoltLinearModelSettings(AttrDict[Any]):
+ """
+ :arg alpha:
+ :arg beta:
+ """
+
+ alpha: Union[float, DefaultType]
beta: Union[float, DefaultType]
def __init__(
@@ -1255,22 +1845,84 @@ def __init__(
super().__init__(kwargs)
-class IntervalsQuery(QueryBase):
+class IntervalsAllOf(AttrDict[Any]):
+ """
+ :arg intervals: (required) An array of rules to combine. All rules
+ must produce a match in a document for the overall source to
+ match.
+ :arg max_gaps: Maximum number of positions between the matching terms.
+ Intervals produced by the rules further apart than this are not
+ considered matches. Defaults to `-1` if omitted.
+ :arg ordered: If `true`, intervals produced by the rules should appear
+ in the order in which they are specified.
+ :arg filter: Rule used to filter returned intervals.
+ """
+
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ]
+ max_gaps: Union[int, DefaultType]
+ ordered: Union[bool, DefaultType]
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ] = DEFAULT,
+ max_gaps: Union[int, DefaultType] = DEFAULT,
+ ordered: Union[bool, DefaultType] = DEFAULT,
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if intervals is not DEFAULT:
+ kwargs["intervals"] = intervals
+ if max_gaps is not DEFAULT:
+ kwargs["max_gaps"] = max_gaps
+ if ordered is not DEFAULT:
+ kwargs["ordered"] = ordered
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ super().__init__(kwargs)
+
+
+class IntervalsAnyOf(AttrDict[Any]):
+ """
+ :arg intervals: (required) An array of rules to match.
+ :arg filter: Rule used to filter returned intervals.
+ """
+
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ]
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+
+ def __init__(
+ self,
+ *,
+ intervals: Union[
+ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+ ] = DEFAULT,
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if intervals is not DEFAULT:
+ kwargs["intervals"] = intervals
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ super().__init__(kwargs)
+
+
+class IntervalsContainer(AttrDict[Any]):
"""
:arg all_of: Returns matches that span a combination of other rules.
:arg any_of: Returns intervals produced by any of its sub-rules.
- :arg fuzzy: Matches terms that are similar to the provided term,
- within an edit distance defined by `fuzziness`.
+ :arg fuzzy: Matches analyzed text.
:arg match: Matches analyzed text.
:arg prefix: Matches terms that start with a specified set of
characters.
:arg wildcard: Matches terms using a wildcard pattern.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
"""
all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType]
@@ -1279,8 +1931,6 @@ class IntervalsQuery(QueryBase):
match: Union["IntervalsMatch", Dict[str, Any], DefaultType]
prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType]
wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
def __init__(
self,
@@ -1291,8 +1941,6 @@ def __init__(
match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT,
prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT,
wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
if all_of is not DEFAULT:
@@ -1307,350 +1955,225 @@ def __init__(
kwargs["prefix"] = prefix
if wildcard is not DEFAULT:
kwargs["wildcard"] = wildcard
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ super().__init__(kwargs)
-class IpRangeAggregationRange(AttrDict[Any]):
+class IntervalsFilter(AttrDict[Any]):
"""
- :arg from: Start of the range.
- :arg mask: IP range defined as a CIDR mask.
- :arg to: End of the range.
+ :arg after: Query used to return intervals that follow an interval
+ from the `filter` rule.
+ :arg before: Query used to return intervals that occur before an
+ interval from the `filter` rule.
+ :arg contained_by: Query used to return intervals contained by an
+ interval from the `filter` rule.
+ :arg containing: Query used to return intervals that contain an
+ interval from the `filter` rule.
+ :arg not_contained_by: Query used to return intervals that are **not**
+ contained by an interval from the `filter` rule.
+ :arg not_containing: Query used to return intervals that do **not**
+ contain an interval from the `filter` rule.
+ :arg not_overlapping: Query used to return intervals that do **not**
+ overlap with an interval from the `filter` rule.
+ :arg overlapping: Query used to return intervals that overlap with an
+ interval from the `filter` rule.
+ :arg script: Script used to return matching documents. This script
+ must return a boolean value: `true` or `false`.
"""
- from_: Union[str, None, DefaultType]
- mask: Union[str, DefaultType]
- to: Union[str, None, DefaultType]
+ after: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ before: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- from_: Union[str, None, DefaultType] = DEFAULT,
- mask: Union[str, DefaultType] = DEFAULT,
- to: Union[str, None, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if from_ is not DEFAULT:
- kwargs["from_"] = from_
- if mask is not DEFAULT:
- kwargs["mask"] = mask
- if to is not DEFAULT:
- kwargs["to"] = to
- super().__init__(kwargs)
-
-
-class LatLonGeoLocation(AttrDict[Any]):
- """
- :arg lat: (required) Latitude
- :arg lon: (required) Longitude
- """
-
- lat: Union[float, DefaultType]
- lon: Union[float, DefaultType]
-
- def __init__(
- self,
- *,
- lat: Union[float, DefaultType] = DEFAULT,
- lon: Union[float, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if lat is not DEFAULT:
- kwargs["lat"] = lat
- if lon is not DEFAULT:
- kwargs["lon"] = lon
- super().__init__(kwargs)
-
-
-class LikeDocument(AttrDict[Any]):
- """
- :arg doc: A document not present in the index.
- :arg fields:
- :arg _id: ID of a document.
- :arg _index: Index of a document.
- :arg per_field_analyzer: Overrides the default analyzer.
- :arg routing:
- :arg version:
- :arg version_type: Defaults to `'internal'` if omitted.
- """
-
- doc: Any
- fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType]
- _id: Union[str, DefaultType]
- _index: Union[str, DefaultType]
- per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType]
- routing: Union[str, DefaultType]
- version: Union[int, DefaultType]
- version_type: Union[
- Literal["internal", "external", "external_gte", "force"], DefaultType
- ]
-
- def __init__(
- self,
- *,
- doc: Any = DEFAULT,
- fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT,
- _id: Union[str, DefaultType] = DEFAULT,
- _index: Union[str, DefaultType] = DEFAULT,
- per_field_analyzer: Union[
- Mapping[Union[str, InstrumentedField], str], DefaultType
+ after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+ before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+ contained_by: Union[
+ "IntervalsContainer", Dict[str, Any], DefaultType
] = DEFAULT,
- routing: Union[str, DefaultType] = DEFAULT,
- version: Union[int, DefaultType] = DEFAULT,
- version_type: Union[
- Literal["internal", "external", "external_gte", "force"], DefaultType
+ containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+ not_contained_by: Union[
+ "IntervalsContainer", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ not_containing: Union[
+ "IntervalsContainer", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ not_overlapping: Union[
+ "IntervalsContainer", Dict[str, Any], DefaultType
] = DEFAULT,
+ overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if doc is not DEFAULT:
- kwargs["doc"] = doc
- if fields is not DEFAULT:
- kwargs["fields"] = str(fields)
- if _id is not DEFAULT:
- kwargs["_id"] = _id
- if _index is not DEFAULT:
- kwargs["_index"] = _index
- if per_field_analyzer is not DEFAULT:
- kwargs["per_field_analyzer"] = str(per_field_analyzer)
- if routing is not DEFAULT:
- kwargs["routing"] = routing
- if version is not DEFAULT:
- kwargs["version"] = version
- if version_type is not DEFAULT:
- kwargs["version_type"] = version_type
+ if after is not DEFAULT:
+ kwargs["after"] = after
+ if before is not DEFAULT:
+ kwargs["before"] = before
+ if contained_by is not DEFAULT:
+ kwargs["contained_by"] = contained_by
+ if containing is not DEFAULT:
+ kwargs["containing"] = containing
+ if not_contained_by is not DEFAULT:
+ kwargs["not_contained_by"] = not_contained_by
+ if not_containing is not DEFAULT:
+ kwargs["not_containing"] = not_containing
+ if not_overlapping is not DEFAULT:
+ kwargs["not_overlapping"] = not_overlapping
+ if overlapping is not DEFAULT:
+ kwargs["overlapping"] = overlapping
+ if script is not DEFAULT:
+ kwargs["script"] = script
super().__init__(kwargs)
-class MatchBoolPrefixQuery(QueryBase):
+class IntervalsFuzzy(AttrDict[Any]):
"""
- :arg query: (required) Terms you wish to find in the provided field.
- The last term is used in a prefix query.
- :arg analyzer: Analyzer used to convert the text in the query value
- into tokens.
- :arg fuzziness: Maximum edit distance allowed for matching. Can be
- applied to the term subqueries constructed for all terms but the
- final term.
- :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied
- to the term subqueries constructed for all terms but the final
- term.
- :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
- transpositions of two adjacent characters (for example, `ab` to
- `ba`). Can be applied to the term subqueries constructed for all
- terms but the final term. Defaults to `True` if omitted.
- :arg max_expansions: Maximum number of terms to which the query will
- expand. Can be applied to the term subqueries constructed for all
- terms but the final term. Defaults to `50` if omitted.
- :arg minimum_should_match: Minimum number of clauses that must match
- for a document to be returned. Applied to the constructed bool
- query.
- :arg operator: Boolean logic used to interpret text in the query
- value. Applied to the constructed bool query. Defaults to `'or'`
- if omitted.
- :arg prefix_length: Number of beginning characters left unchanged for
- fuzzy matching. Can be applied to the term subqueries constructed
- for all terms but the final term.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg term: (required) The term to match.
+ :arg analyzer: Analyzer used to normalize the term.
+ :arg fuzziness: Maximum edit distance allowed for matching. Defaults
+ to `auto` if omitted.
+ :arg prefix_length: Number of beginning characters left unchanged when
+ creating expansions.
+ :arg transpositions: Indicates whether edits include transpositions of
+ two adjacent characters (for example, `ab` to `ba`). Defaults to
+ `True` if omitted.
+ :arg use_field: If specified, match intervals from this field rather
+ than the top-level field. The `term` is normalized using the
+ search analyzer from this field, unless `analyzer` is specified
+ separately.
"""
- query: Union[str, DefaultType]
+ term: Union[str, DefaultType]
analyzer: Union[str, DefaultType]
fuzziness: Union[str, int, DefaultType]
- fuzzy_rewrite: Union[str, DefaultType]
- fuzzy_transpositions: Union[bool, DefaultType]
- max_expansions: Union[int, DefaultType]
- minimum_should_match: Union[int, str, DefaultType]
- operator: Union[Literal["and", "or"], DefaultType]
prefix_length: Union[int, DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ transpositions: Union[bool, DefaultType]
+ use_field: Union[str, InstrumentedField, DefaultType]
def __init__(
self,
*,
- query: Union[str, DefaultType] = DEFAULT,
+ term: Union[str, DefaultType] = DEFAULT,
analyzer: Union[str, DefaultType] = DEFAULT,
fuzziness: Union[str, int, DefaultType] = DEFAULT,
- fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
- fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
- max_expansions: Union[int, DefaultType] = DEFAULT,
- minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
- operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
prefix_length: Union[int, DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ transpositions: Union[bool, DefaultType] = DEFAULT,
+ use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if query is not DEFAULT:
- kwargs["query"] = query
+ if term is not DEFAULT:
+ kwargs["term"] = term
if analyzer is not DEFAULT:
kwargs["analyzer"] = analyzer
if fuzziness is not DEFAULT:
kwargs["fuzziness"] = fuzziness
- if fuzzy_rewrite is not DEFAULT:
- kwargs["fuzzy_rewrite"] = fuzzy_rewrite
- if fuzzy_transpositions is not DEFAULT:
- kwargs["fuzzy_transpositions"] = fuzzy_transpositions
- if max_expansions is not DEFAULT:
- kwargs["max_expansions"] = max_expansions
- if minimum_should_match is not DEFAULT:
- kwargs["minimum_should_match"] = minimum_should_match
- if operator is not DEFAULT:
- kwargs["operator"] = operator
if prefix_length is not DEFAULT:
kwargs["prefix_length"] = prefix_length
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if transpositions is not DEFAULT:
+ kwargs["transpositions"] = transpositions
+ if use_field is not DEFAULT:
+ kwargs["use_field"] = str(use_field)
+ super().__init__(kwargs)
-class MatchPhrasePrefixQuery(QueryBase):
+class IntervalsMatch(AttrDict[Any]):
"""
:arg query: (required) Text you wish to find in the provided field.
- :arg analyzer: Analyzer used to convert text in the query value into
- tokens.
- :arg max_expansions: Maximum number of terms to which the last
- provided term of the query value will expand. Defaults to `50` if
- omitted.
- :arg slop: Maximum number of positions allowed between matching
- tokens.
- :arg zero_terms_query: Indicates whether no documents are returned if
- the analyzer removes all tokens, such as when using a `stop`
- filter. Defaults to `none` if omitted.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg analyzer: Analyzer used to analyze terms in the query.
+ :arg max_gaps: Maximum number of positions between the matching terms.
+ Terms further apart than this are not considered matches. Defaults
+ to `-1` if omitted.
+ :arg ordered: If `true`, matching terms must appear in their specified
+ order.
+ :arg use_field: If specified, match intervals from this field rather
+ than the top-level field. The `term` is normalized using the
+ search analyzer from this field, unless `analyzer` is specified
+ separately.
+ :arg filter: An optional interval filter.
"""
query: Union[str, DefaultType]
analyzer: Union[str, DefaultType]
- max_expansions: Union[int, DefaultType]
- slop: Union[int, DefaultType]
- zero_terms_query: Union[Literal["all", "none"], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ max_gaps: Union[int, DefaultType]
+ ordered: Union[bool, DefaultType]
+ use_field: Union[str, InstrumentedField, DefaultType]
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
def __init__(
self,
*,
query: Union[str, DefaultType] = DEFAULT,
analyzer: Union[str, DefaultType] = DEFAULT,
- max_expansions: Union[int, DefaultType] = DEFAULT,
- slop: Union[int, DefaultType] = DEFAULT,
- zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ max_gaps: Union[int, DefaultType] = DEFAULT,
+ ordered: Union[bool, DefaultType] = DEFAULT,
+ use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
if query is not DEFAULT:
kwargs["query"] = query
if analyzer is not DEFAULT:
kwargs["analyzer"] = analyzer
- if max_expansions is not DEFAULT:
- kwargs["max_expansions"] = max_expansions
- if slop is not DEFAULT:
- kwargs["slop"] = slop
- if zero_terms_query is not DEFAULT:
- kwargs["zero_terms_query"] = zero_terms_query
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if max_gaps is not DEFAULT:
+ kwargs["max_gaps"] = max_gaps
+ if ordered is not DEFAULT:
+ kwargs["ordered"] = ordered
+ if use_field is not DEFAULT:
+ kwargs["use_field"] = str(use_field)
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ super().__init__(kwargs)
-class MatchPhraseQuery(QueryBase):
+class IntervalsPrefix(AttrDict[Any]):
"""
- :arg query: (required) Query terms that are analyzed and turned into a
- phrase query.
- :arg analyzer: Analyzer used to convert the text in the query value
- into tokens.
- :arg slop: Maximum number of positions allowed between matching
- tokens.
- :arg zero_terms_query: Indicates whether no documents are returned if
- the `analyzer` removes all tokens, such as when using a `stop`
- filter. Defaults to `'none'` if omitted.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg prefix: (required) Beginning characters of terms you wish to find
+ in the top-level field.
+ :arg analyzer: Analyzer used to analyze the `prefix`.
+ :arg use_field: If specified, match intervals from this field rather
+ than the top-level field. The `prefix` is normalized using the
+ search analyzer from this field, unless `analyzer` is specified
+ separately.
"""
- query: Union[str, DefaultType]
+ prefix: Union[str, DefaultType]
analyzer: Union[str, DefaultType]
- slop: Union[int, DefaultType]
- zero_terms_query: Union[Literal["all", "none"], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ use_field: Union[str, InstrumentedField, DefaultType]
def __init__(
self,
*,
- query: Union[str, DefaultType] = DEFAULT,
+ prefix: Union[str, DefaultType] = DEFAULT,
analyzer: Union[str, DefaultType] = DEFAULT,
- slop: Union[int, DefaultType] = DEFAULT,
- zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if query is not DEFAULT:
- kwargs["query"] = query
+ if prefix is not DEFAULT:
+ kwargs["prefix"] = prefix
if analyzer is not DEFAULT:
kwargs["analyzer"] = analyzer
- if slop is not DEFAULT:
- kwargs["slop"] = slop
- if zero_terms_query is not DEFAULT:
- kwargs["zero_terms_query"] = zero_terms_query
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if use_field is not DEFAULT:
+ kwargs["use_field"] = str(use_field)
+ super().__init__(kwargs)
-class MatchQuery(QueryBase):
+class IntervalsQuery(QueryBase):
"""
- :arg query: (required) Text, number, boolean value or date you wish to
- find in the provided field.
- :arg analyzer: Analyzer used to convert the text in the query value
- into tokens.
- :arg auto_generate_synonyms_phrase_query: If `true`, match phrase
- queries are automatically created for multi-term synonyms.
- Defaults to `True` if omitted.
- :arg cutoff_frequency:
- :arg fuzziness: Maximum edit distance allowed for matching.
- :arg fuzzy_rewrite: Method used to rewrite the query.
- :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
- transpositions of two adjacent characters (for example, `ab` to
- `ba`). Defaults to `True` if omitted.
- :arg lenient: If `true`, format-based errors, such as providing a text
- query value for a numeric field, are ignored.
- :arg max_expansions: Maximum number of terms to which the query will
- expand. Defaults to `50` if omitted.
- :arg minimum_should_match: Minimum number of clauses that must match
- for a document to be returned.
- :arg operator: Boolean logic used to interpret text in the query
- value. Defaults to `'or'` if omitted.
- :arg prefix_length: Number of beginning characters left unchanged for
- fuzzy matching.
- :arg zero_terms_query: Indicates whether no documents are returned if
- the `analyzer` removes all tokens, such as when using a `stop`
- filter. Defaults to `'none'` if omitted.
+ :arg all_of: Returns matches that span a combination of other rules.
+ :arg any_of: Returns intervals produced by any of its sub-rules.
+ :arg fuzzy: Matches terms that are similar to the provided term,
+ within an edit distance defined by `fuzziness`.
+ :arg match: Matches analyzed text.
+ :arg prefix: Matches terms that start with a specified set of
+ characters.
+ :arg wildcard: Matches terms using a wildcard pattern.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -1659,70 +2182,40 @@ class MatchQuery(QueryBase):
:arg _name:
"""
- query: Union[str, float, bool, DefaultType]
- analyzer: Union[str, DefaultType]
- auto_generate_synonyms_phrase_query: Union[bool, DefaultType]
- cutoff_frequency: Union[float, DefaultType]
- fuzziness: Union[str, int, DefaultType]
- fuzzy_rewrite: Union[str, DefaultType]
- fuzzy_transpositions: Union[bool, DefaultType]
- lenient: Union[bool, DefaultType]
- max_expansions: Union[int, DefaultType]
- minimum_should_match: Union[int, str, DefaultType]
- operator: Union[Literal["and", "or"], DefaultType]
- prefix_length: Union[int, DefaultType]
- zero_terms_query: Union[Literal["all", "none"], DefaultType]
+ all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType]
+ any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType]
+ fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType]
+ match: Union["IntervalsMatch", Dict[str, Any], DefaultType]
+ prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType]
+ wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- query: Union[str, float, bool, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT,
- cutoff_frequency: Union[float, DefaultType] = DEFAULT,
- fuzziness: Union[str, int, DefaultType] = DEFAULT,
- fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
- fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
- lenient: Union[bool, DefaultType] = DEFAULT,
- max_expansions: Union[int, DefaultType] = DEFAULT,
- minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
- operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
- prefix_length: Union[int, DefaultType] = DEFAULT,
- zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+ all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT,
+ any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT,
+ fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT,
+ match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT,
+ prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT,
+ wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if query is not DEFAULT:
- kwargs["query"] = query
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if auto_generate_synonyms_phrase_query is not DEFAULT:
- kwargs["auto_generate_synonyms_phrase_query"] = (
- auto_generate_synonyms_phrase_query
- )
- if cutoff_frequency is not DEFAULT:
- kwargs["cutoff_frequency"] = cutoff_frequency
- if fuzziness is not DEFAULT:
- kwargs["fuzziness"] = fuzziness
- if fuzzy_rewrite is not DEFAULT:
- kwargs["fuzzy_rewrite"] = fuzzy_rewrite
- if fuzzy_transpositions is not DEFAULT:
- kwargs["fuzzy_transpositions"] = fuzzy_transpositions
- if lenient is not DEFAULT:
- kwargs["lenient"] = lenient
- if max_expansions is not DEFAULT:
- kwargs["max_expansions"] = max_expansions
- if minimum_should_match is not DEFAULT:
- kwargs["minimum_should_match"] = minimum_should_match
- if operator is not DEFAULT:
- kwargs["operator"] = operator
- if prefix_length is not DEFAULT:
- kwargs["prefix_length"] = prefix_length
- if zero_terms_query is not DEFAULT:
- kwargs["zero_terms_query"] = zero_terms_query
+ if all_of is not DEFAULT:
+ kwargs["all_of"] = all_of
+ if any_of is not DEFAULT:
+ kwargs["any_of"] = any_of
+ if fuzzy is not DEFAULT:
+ kwargs["fuzzy"] = fuzzy
+ if match is not DEFAULT:
+ kwargs["match"] = match
+ if prefix is not DEFAULT:
+ kwargs["prefix"] = prefix
+ if wildcard is not DEFAULT:
+ kwargs["wildcard"] = wildcard
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -1730,180 +2223,176 @@ def __init__(
super().__init__(**kwargs)
-class PipelineAggregationBase(BucketPathAggregation):
+class IntervalsWildcard(AttrDict[Any]):
"""
- :arg format: `DecimalFormat` pattern for the output value. If
- specified, the formatted value is returned in the aggregation’s
- `value_as_string` property.
- :arg gap_policy: Policy to apply when gaps are found in the data.
- Defaults to `skip` if omitted.
- :arg buckets_path: Path to the buckets that contain one set of values
- to correlate.
+ :arg pattern: (required) Wildcard pattern used to find matching terms.
+ :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the
+ top-level field's analyzer.
+ :arg use_field: If specified, match intervals from this field rather
+ than the top-level field. The `pattern` is normalized using the
+ search analyzer from this field, unless `analyzer` is specified
+ separately.
"""
- format: Union[str, DefaultType]
- gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
- buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
+ pattern: Union[str, DefaultType]
+ analyzer: Union[str, DefaultType]
+ use_field: Union[str, InstrumentedField, DefaultType]
def __init__(
self,
*,
- format: Union[str, DefaultType] = DEFAULT,
- gap_policy: Union[
- Literal["skip", "insert_zeros", "keep_values"], DefaultType
- ] = DEFAULT,
- buckets_path: Union[
- str, Sequence[str], Mapping[str, str], DefaultType
- ] = DEFAULT,
+ pattern: Union[str, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if format is not DEFAULT:
- kwargs["format"] = format
- if gap_policy is not DEFAULT:
- kwargs["gap_policy"] = gap_policy
- if buckets_path is not DEFAULT:
- kwargs["buckets_path"] = buckets_path
- super().__init__(**kwargs)
+ if pattern is not DEFAULT:
+ kwargs["pattern"] = pattern
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if use_field is not DEFAULT:
+ kwargs["use_field"] = str(use_field)
+ super().__init__(kwargs)
-class MovingAverageAggregationBase(PipelineAggregationBase):
+class IpRangeAggregationRange(AttrDict[Any]):
"""
- :arg minimize:
- :arg predict:
- :arg window:
- :arg format: `DecimalFormat` pattern for the output value. If
- specified, the formatted value is returned in the aggregation’s
- `value_as_string` property.
- :arg gap_policy: Policy to apply when gaps are found in the data.
- Defaults to `skip` if omitted.
- :arg buckets_path: Path to the buckets that contain one set of values
- to correlate.
+ :arg from: Start of the range.
+ :arg mask: IP range defined as a CIDR mask.
+ :arg to: End of the range.
"""
- minimize: Union[bool, DefaultType]
- predict: Union[int, DefaultType]
- window: Union[int, DefaultType]
- format: Union[str, DefaultType]
- gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
- buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
+ from_: Union[str, None, DefaultType]
+ mask: Union[str, DefaultType]
+ to: Union[str, None, DefaultType]
def __init__(
self,
*,
- minimize: Union[bool, DefaultType] = DEFAULT,
- predict: Union[int, DefaultType] = DEFAULT,
- window: Union[int, DefaultType] = DEFAULT,
- format: Union[str, DefaultType] = DEFAULT,
- gap_policy: Union[
- Literal["skip", "insert_zeros", "keep_values"], DefaultType
- ] = DEFAULT,
- buckets_path: Union[
- str, Sequence[str], Mapping[str, str], DefaultType
- ] = DEFAULT,
- **kwargs: Any,
- ):
- if minimize is not DEFAULT:
- kwargs["minimize"] = minimize
- if predict is not DEFAULT:
- kwargs["predict"] = predict
- if window is not DEFAULT:
- kwargs["window"] = window
- if format is not DEFAULT:
- kwargs["format"] = format
- if gap_policy is not DEFAULT:
- kwargs["gap_policy"] = gap_policy
- if buckets_path is not DEFAULT:
- kwargs["buckets_path"] = buckets_path
- super().__init__(**kwargs)
-
-
-class MultiTermLookup(AttrDict[Any]):
- """
- :arg field: (required) A fields from which to retrieve terms.
- :arg missing: The value to apply to documents that do not have a
- value. By default, documents without a value are ignored.
- """
-
- field: Union[str, InstrumentedField, DefaultType]
- missing: Union[str, int, float, bool, DefaultType]
-
- def __init__(
- self,
- *,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+ from_: Union[str, None, DefaultType] = DEFAULT,
+ mask: Union[str, DefaultType] = DEFAULT,
+ to: Union[str, None, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
- if missing is not DEFAULT:
- kwargs["missing"] = missing
+ if from_ is not DEFAULT:
+ kwargs["from_"] = from_
+ if mask is not DEFAULT:
+ kwargs["mask"] = mask
+ if to is not DEFAULT:
+ kwargs["to"] = to
super().__init__(kwargs)
-class MutualInformationHeuristic(AttrDict[Any]):
+class LatLonGeoLocation(AttrDict[Any]):
"""
- :arg background_is_superset: Set to `false` if you defined a custom
- background filter that represents a different set of documents
- that you want to compare to.
- :arg include_negatives: Set to `false` to filter out the terms that
- appear less often in the subset than in documents outside the
- subset.
+ :arg lat: (required) Latitude
+ :arg lon: (required) Longitude
"""
- background_is_superset: Union[bool, DefaultType]
- include_negatives: Union[bool, DefaultType]
+ lat: Union[float, DefaultType]
+ lon: Union[float, DefaultType]
def __init__(
self,
*,
- background_is_superset: Union[bool, DefaultType] = DEFAULT,
- include_negatives: Union[bool, DefaultType] = DEFAULT,
+ lat: Union[float, DefaultType] = DEFAULT,
+ lon: Union[float, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if background_is_superset is not DEFAULT:
- kwargs["background_is_superset"] = background_is_superset
- if include_negatives is not DEFAULT:
- kwargs["include_negatives"] = include_negatives
+ if lat is not DEFAULT:
+ kwargs["lat"] = lat
+ if lon is not DEFAULT:
+ kwargs["lon"] = lon
super().__init__(kwargs)
-class PercentageScoreHeuristic(AttrDict[Any]):
- pass
-
-
-class PinnedDoc(AttrDict[Any]):
+class LikeDocument(AttrDict[Any]):
"""
- :arg _id: (required) The unique document ID.
- :arg _index: (required) The index that contains the document.
+ :arg doc: A document not present in the index.
+ :arg fields:
+ :arg _id: ID of a document.
+ :arg _index: Index of a document.
+ :arg per_field_analyzer: Overrides the default analyzer.
+ :arg routing:
+ :arg version:
+ :arg version_type: Defaults to `'internal'` if omitted.
"""
+ doc: Any
+ fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType]
_id: Union[str, DefaultType]
_index: Union[str, DefaultType]
+ per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType]
+ routing: Union[str, DefaultType]
+ version: Union[int, DefaultType]
+ version_type: Union[
+ Literal["internal", "external", "external_gte", "force"], DefaultType
+ ]
def __init__(
self,
*,
+ doc: Any = DEFAULT,
+ fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT,
_id: Union[str, DefaultType] = DEFAULT,
_index: Union[str, DefaultType] = DEFAULT,
+ per_field_analyzer: Union[
+ Mapping[Union[str, InstrumentedField], str], DefaultType
+ ] = DEFAULT,
+ routing: Union[str, DefaultType] = DEFAULT,
+ version: Union[int, DefaultType] = DEFAULT,
+ version_type: Union[
+ Literal["internal", "external", "external_gte", "force"], DefaultType
+ ] = DEFAULT,
**kwargs: Any,
):
+ if doc is not DEFAULT:
+ kwargs["doc"] = doc
+ if fields is not DEFAULT:
+ kwargs["fields"] = str(fields)
if _id is not DEFAULT:
kwargs["_id"] = _id
if _index is not DEFAULT:
kwargs["_index"] = _index
+ if per_field_analyzer is not DEFAULT:
+ kwargs["per_field_analyzer"] = str(per_field_analyzer)
+ if routing is not DEFAULT:
+ kwargs["routing"] = routing
+ if version is not DEFAULT:
+ kwargs["version"] = version
+ if version_type is not DEFAULT:
+ kwargs["version_type"] = version_type
super().__init__(kwargs)
-class PrefixQuery(QueryBase):
+class MatchBoolPrefixQuery(QueryBase):
"""
- :arg value: (required) Beginning characters of terms you wish to find
- in the provided field.
- :arg rewrite: Method used to rewrite the query.
- :arg case_insensitive: Allows ASCII case insensitive matching of the
- value with the indexed field values when set to `true`. Default is
- `false` which means the case sensitivity of matching depends on
- the underlying field’s mapping.
+ :arg query: (required) Terms you wish to find in the provided field.
+ The last term is used in a prefix query.
+ :arg analyzer: Analyzer used to convert the text in the query value
+ into tokens.
+ :arg fuzziness: Maximum edit distance allowed for matching. Can be
+ applied to the term subqueries constructed for all terms but the
+ final term.
+ :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied
+ to the term subqueries constructed for all terms but the final
+ term.
+ :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+ transpositions of two adjacent characters (for example, `ab` to
+ `ba`). Can be applied to the term subqueries constructed for all
+ terms but the final term. Defaults to `True` if omitted.
+ :arg max_expansions: Maximum number of terms to which the query will
+ expand. Can be applied to the term subqueries constructed for all
+ terms but the final term. Defaults to `50` if omitted.
+ :arg minimum_should_match: Minimum number of clauses that must match
+ for a document to be returned. Applied to the constructed bool
+ query.
+ :arg operator: Boolean logic used to interpret text in the query
+ value. Applied to the constructed bool query. Defaults to `'or'`
+ if omitted.
+ :arg prefix_length: Number of beginning characters left unchanged for
+ fuzzy matching. Can be applied to the term subqueries constructed
+ for all terms but the final term.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -1912,28 +2401,52 @@ class PrefixQuery(QueryBase):
:arg _name:
"""
- value: Union[str, DefaultType]
- rewrite: Union[str, DefaultType]
- case_insensitive: Union[bool, DefaultType]
+ query: Union[str, DefaultType]
+ analyzer: Union[str, DefaultType]
+ fuzziness: Union[str, int, DefaultType]
+ fuzzy_rewrite: Union[str, DefaultType]
+ fuzzy_transpositions: Union[bool, DefaultType]
+ max_expansions: Union[int, DefaultType]
+ minimum_should_match: Union[int, str, DefaultType]
+ operator: Union[Literal["and", "or"], DefaultType]
+ prefix_length: Union[int, DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- value: Union[str, DefaultType] = DEFAULT,
- rewrite: Union[str, DefaultType] = DEFAULT,
- case_insensitive: Union[bool, DefaultType] = DEFAULT,
+ query: Union[str, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ fuzziness: Union[str, int, DefaultType] = DEFAULT,
+ fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
+ fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
+ max_expansions: Union[int, DefaultType] = DEFAULT,
+ minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+ operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+ prefix_length: Union[int, DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if value is not DEFAULT:
- kwargs["value"] = value
- if rewrite is not DEFAULT:
- kwargs["rewrite"] = rewrite
- if case_insensitive is not DEFAULT:
- kwargs["case_insensitive"] = case_insensitive
+ if query is not DEFAULT:
+ kwargs["query"] = query
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if fuzziness is not DEFAULT:
+ kwargs["fuzziness"] = fuzziness
+ if fuzzy_rewrite is not DEFAULT:
+ kwargs["fuzzy_rewrite"] = fuzzy_rewrite
+ if fuzzy_transpositions is not DEFAULT:
+ kwargs["fuzzy_transpositions"] = fuzzy_transpositions
+ if max_expansions is not DEFAULT:
+ kwargs["max_expansions"] = max_expansions
+ if minimum_should_match is not DEFAULT:
+ kwargs["minimum_should_match"] = minimum_should_match
+ if operator is not DEFAULT:
+ kwargs["operator"] = operator
+ if prefix_length is not DEFAULT:
+ kwargs["prefix_length"] = prefix_length
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -1941,97 +2454,144 @@ def __init__(
super().__init__(**kwargs)
-class QueryVectorBuilder(AttrDict[Any]):
+class MatchPhrasePrefixQuery(QueryBase):
"""
- :arg text_embedding:
+ :arg query: (required) Text you wish to find in the provided field.
+ :arg analyzer: Analyzer used to convert text in the query value into
+ tokens.
+ :arg max_expansions: Maximum number of terms to which the last
+ provided term of the query value will expand. Defaults to `50` if
+ omitted.
+ :arg slop: Maximum number of positions allowed between matching
+ tokens.
+ :arg zero_terms_query: Indicates whether no documents are returned if
+ the analyzer removes all tokens, such as when using a `stop`
+ filter. Defaults to `none` if omitted.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType]
+ query: Union[str, DefaultType]
+ analyzer: Union[str, DefaultType]
+ max_expansions: Union[int, DefaultType]
+ slop: Union[int, DefaultType]
+ zero_terms_query: Union[Literal["all", "none"], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT,
+ query: Union[str, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ max_expansions: Union[int, DefaultType] = DEFAULT,
+ slop: Union[int, DefaultType] = DEFAULT,
+ zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if text_embedding is not DEFAULT:
- kwargs["text_embedding"] = text_embedding
- super().__init__(kwargs)
-
-
-class RankFeatureFunction(AttrDict[Any]):
- pass
-
-
-class RankFeatureFunctionLinear(RankFeatureFunction):
- pass
-
-
-class RankFeatureFunctionLogarithm(RankFeatureFunction):
- """
- :arg scaling_factor: (required) Configurable scaling factor.
- """
-
- scaling_factor: Union[float, DefaultType]
-
- def __init__(
- self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any
- ):
- if scaling_factor is not DEFAULT:
- kwargs["scaling_factor"] = scaling_factor
- super().__init__(**kwargs)
-
-
-class RankFeatureFunctionSaturation(RankFeatureFunction):
- """
- :arg pivot: Configurable pivot value so that the result will be less
- than 0.5.
- """
-
- pivot: Union[float, DefaultType]
-
- def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
- if pivot is not DEFAULT:
- kwargs["pivot"] = pivot
+ if query is not DEFAULT:
+ kwargs["query"] = query
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if max_expansions is not DEFAULT:
+ kwargs["max_expansions"] = max_expansions
+ if slop is not DEFAULT:
+ kwargs["slop"] = slop
+ if zero_terms_query is not DEFAULT:
+ kwargs["zero_terms_query"] = zero_terms_query
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
super().__init__(**kwargs)
-class RankFeatureFunctionSigmoid(RankFeatureFunction):
+class MatchPhraseQuery(QueryBase):
"""
- :arg pivot: (required) Configurable pivot value so that the result
- will be less than 0.5.
- :arg exponent: (required) Configurable Exponent.
+ :arg query: (required) Query terms that are analyzed and turned into a
+ phrase query.
+ :arg analyzer: Analyzer used to convert the text in the query value
+ into tokens.
+ :arg slop: Maximum number of positions allowed between matching
+ tokens.
+ :arg zero_terms_query: Indicates whether no documents are returned if
+ the `analyzer` removes all tokens, such as when using a `stop`
+ filter. Defaults to `'none'` if omitted.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- pivot: Union[float, DefaultType]
- exponent: Union[float, DefaultType]
+ query: Union[str, DefaultType]
+ analyzer: Union[str, DefaultType]
+ slop: Union[int, DefaultType]
+ zero_terms_query: Union[Literal["all", "none"], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- pivot: Union[float, DefaultType] = DEFAULT,
- exponent: Union[float, DefaultType] = DEFAULT,
+ query: Union[str, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ slop: Union[int, DefaultType] = DEFAULT,
+ zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if pivot is not DEFAULT:
- kwargs["pivot"] = pivot
- if exponent is not DEFAULT:
- kwargs["exponent"] = exponent
+ if query is not DEFAULT:
+ kwargs["query"] = query
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if slop is not DEFAULT:
+ kwargs["slop"] = slop
+ if zero_terms_query is not DEFAULT:
+ kwargs["zero_terms_query"] = zero_terms_query
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
super().__init__(**kwargs)
-class RegexpQuery(QueryBase):
+class MatchQuery(QueryBase):
"""
- :arg value: (required) Regular expression for terms you wish to find
- in the provided field.
- :arg case_insensitive: Allows case insensitive matching of the regular
- expression value with the indexed field values when set to `true`.
- When `false`, case sensitivity of matching depends on the
- underlying field’s mapping.
- :arg flags: Enables optional operators for the regular expression.
- :arg max_determinized_states: Maximum number of automaton states
- required for the query. Defaults to `10000` if omitted.
- :arg rewrite: Method used to rewrite the query.
+ :arg query: (required) Text, number, boolean value or date you wish to
+ find in the provided field.
+ :arg analyzer: Analyzer used to convert the text in the query value
+ into tokens.
+ :arg auto_generate_synonyms_phrase_query: If `true`, match phrase
+ queries are automatically created for multi-term synonyms.
+ Defaults to `True` if omitted.
+ :arg cutoff_frequency:
+ :arg fuzziness: Maximum edit distance allowed for matching.
+ :arg fuzzy_rewrite: Method used to rewrite the query.
+ :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+ transpositions of two adjacent characters (for example, `ab` to
+ `ba`). Defaults to `True` if omitted.
+ :arg lenient: If `true`, format-based errors, such as providing a text
+ query value for a numeric field, are ignored.
+ :arg max_expansions: Maximum number of terms to which the query will
+ expand. Defaults to `50` if omitted.
+ :arg minimum_should_match: Minimum number of clauses that must match
+ for a document to be returned.
+ :arg operator: Boolean logic used to interpret text in the query
+ value. Defaults to `'or'` if omitted.
+ :arg prefix_length: Number of beginning characters left unchanged for
+ fuzzy matching.
+ :arg zero_terms_query: Indicates whether no documents are returned if
+ the `analyzer` removes all tokens, such as when using a `stop`
+ filter. Defaults to `'none'` if omitted.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -2040,36 +2600,70 @@ class RegexpQuery(QueryBase):
:arg _name:
"""
- value: Union[str, DefaultType]
- case_insensitive: Union[bool, DefaultType]
- flags: Union[str, DefaultType]
- max_determinized_states: Union[int, DefaultType]
- rewrite: Union[str, DefaultType]
+ query: Union[str, float, bool, DefaultType]
+ analyzer: Union[str, DefaultType]
+ auto_generate_synonyms_phrase_query: Union[bool, DefaultType]
+ cutoff_frequency: Union[float, DefaultType]
+ fuzziness: Union[str, int, DefaultType]
+ fuzzy_rewrite: Union[str, DefaultType]
+ fuzzy_transpositions: Union[bool, DefaultType]
+ lenient: Union[bool, DefaultType]
+ max_expansions: Union[int, DefaultType]
+ minimum_should_match: Union[int, str, DefaultType]
+ operator: Union[Literal["and", "or"], DefaultType]
+ prefix_length: Union[int, DefaultType]
+ zero_terms_query: Union[Literal["all", "none"], DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- value: Union[str, DefaultType] = DEFAULT,
- case_insensitive: Union[bool, DefaultType] = DEFAULT,
- flags: Union[str, DefaultType] = DEFAULT,
- max_determinized_states: Union[int, DefaultType] = DEFAULT,
- rewrite: Union[str, DefaultType] = DEFAULT,
+ query: Union[str, float, bool, DefaultType] = DEFAULT,
+ analyzer: Union[str, DefaultType] = DEFAULT,
+ auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT,
+ cutoff_frequency: Union[float, DefaultType] = DEFAULT,
+ fuzziness: Union[str, int, DefaultType] = DEFAULT,
+ fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
+ fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
+ lenient: Union[bool, DefaultType] = DEFAULT,
+ max_expansions: Union[int, DefaultType] = DEFAULT,
+ minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+ operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+ prefix_length: Union[int, DefaultType] = DEFAULT,
+ zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if value is not DEFAULT:
- kwargs["value"] = value
- if case_insensitive is not DEFAULT:
- kwargs["case_insensitive"] = case_insensitive
- if flags is not DEFAULT:
- kwargs["flags"] = flags
- if max_determinized_states is not DEFAULT:
- kwargs["max_determinized_states"] = max_determinized_states
- if rewrite is not DEFAULT:
- kwargs["rewrite"] = rewrite
+ if query is not DEFAULT:
+ kwargs["query"] = query
+ if analyzer is not DEFAULT:
+ kwargs["analyzer"] = analyzer
+ if auto_generate_synonyms_phrase_query is not DEFAULT:
+ kwargs["auto_generate_synonyms_phrase_query"] = (
+ auto_generate_synonyms_phrase_query
+ )
+ if cutoff_frequency is not DEFAULT:
+ kwargs["cutoff_frequency"] = cutoff_frequency
+ if fuzziness is not DEFAULT:
+ kwargs["fuzziness"] = fuzziness
+ if fuzzy_rewrite is not DEFAULT:
+ kwargs["fuzzy_rewrite"] = fuzzy_rewrite
+ if fuzzy_transpositions is not DEFAULT:
+ kwargs["fuzzy_transpositions"] = fuzzy_transpositions
+ if lenient is not DEFAULT:
+ kwargs["lenient"] = lenient
+ if max_expansions is not DEFAULT:
+ kwargs["max_expansions"] = max_expansions
+ if minimum_should_match is not DEFAULT:
+ kwargs["minimum_should_match"] = minimum_should_match
+ if operator is not DEFAULT:
+ kwargs["operator"] = operator
+ if prefix_length is not DEFAULT:
+ kwargs["prefix_length"] = prefix_length
+ if zero_terms_query is not DEFAULT:
+ kwargs["zero_terms_query"] = zero_terms_query
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -2077,304 +2671,224 @@ def __init__(
super().__init__(**kwargs)
-class Script(AttrDict[Any]):
+class PipelineAggregationBase(BucketPathAggregation):
"""
- :arg source: The script source.
- :arg id: The `id` for a stored script.
- :arg params: Specifies any named parameters that are passed into the
- script as variables. Use parameters instead of hard-coded values
- to decrease compile time.
- :arg lang: Specifies the language the script is written in. Defaults
- to `painless` if omitted.
- :arg options:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
"""
- source: Union[str, DefaultType]
- id: Union[str, DefaultType]
- params: Union[Mapping[str, Any], DefaultType]
- lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType]
- options: Union[Mapping[str, str], DefaultType]
+ format: Union[str, DefaultType]
+ gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
+ buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
def __init__(
self,
*,
- source: Union[str, DefaultType] = DEFAULT,
- id: Union[str, DefaultType] = DEFAULT,
- params: Union[Mapping[str, Any], DefaultType] = DEFAULT,
- lang: Union[
- Literal["painless", "expression", "mustache", "java"], DefaultType
+ format: Union[str, DefaultType] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], DefaultType
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], DefaultType
] = DEFAULT,
- options: Union[Mapping[str, str], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if source is not DEFAULT:
- kwargs["source"] = source
- if id is not DEFAULT:
- kwargs["id"] = id
- if params is not DEFAULT:
- kwargs["params"] = params
- if lang is not DEFAULT:
- kwargs["lang"] = lang
- if options is not DEFAULT:
- kwargs["options"] = options
- super().__init__(kwargs)
-
-
-class ScriptField(AttrDict[Any]):
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ if gap_policy is not DEFAULT:
+ kwargs["gap_policy"] = gap_policy
+ if buckets_path is not DEFAULT:
+ kwargs["buckets_path"] = buckets_path
+ super().__init__(**kwargs)
+
+
+class MovingAverageAggregationBase(PipelineAggregationBase):
"""
- :arg script: (required)
- :arg ignore_failure:
+ :arg minimize:
+ :arg predict:
+ :arg window:
+ :arg format: `DecimalFormat` pattern for the output value. If
+ specified, the formatted value is returned in the aggregation’s
+ `value_as_string` property.
+ :arg gap_policy: Policy to apply when gaps are found in the data.
+ Defaults to `skip` if omitted.
+ :arg buckets_path: Path to the buckets that contain one set of values
+ to correlate.
"""
- script: Union["Script", Dict[str, Any], DefaultType]
- ignore_failure: Union[bool, DefaultType]
+ minimize: Union[bool, DefaultType]
+ predict: Union[int, DefaultType]
+ window: Union[int, DefaultType]
+ format: Union[str, DefaultType]
+ gap_policy: Union[Literal["skip", "insert_zeros", "keep_values"], DefaultType]
+ buckets_path: Union[str, Sequence[str], Mapping[str, str], DefaultType]
def __init__(
self,
*,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- ignore_failure: Union[bool, DefaultType] = DEFAULT,
+ minimize: Union[bool, DefaultType] = DEFAULT,
+ predict: Union[int, DefaultType] = DEFAULT,
+ window: Union[int, DefaultType] = DEFAULT,
+ format: Union[str, DefaultType] = DEFAULT,
+ gap_policy: Union[
+ Literal["skip", "insert_zeros", "keep_values"], DefaultType
+ ] = DEFAULT,
+ buckets_path: Union[
+ str, Sequence[str], Mapping[str, str], DefaultType
+ ] = DEFAULT,
**kwargs: Any,
):
- if script is not DEFAULT:
- kwargs["script"] = script
- if ignore_failure is not DEFAULT:
- kwargs["ignore_failure"] = ignore_failure
- super().__init__(kwargs)
+ if minimize is not DEFAULT:
+ kwargs["minimize"] = minimize
+ if predict is not DEFAULT:
+ kwargs["predict"] = predict
+ if window is not DEFAULT:
+ kwargs["window"] = window
+ if format is not DEFAULT:
+ kwargs["format"] = format
+ if gap_policy is not DEFAULT:
+ kwargs["gap_policy"] = gap_policy
+ if buckets_path is not DEFAULT:
+ kwargs["buckets_path"] = buckets_path
+ super().__init__(**kwargs)
-class ScriptedHeuristic(AttrDict[Any]):
+class MultiTermLookup(AttrDict[Any]):
"""
- :arg script: (required)
+ :arg field: (required) A fields from which to retrieve terms.
+ :arg missing: The value to apply to documents that do not have a
+ value. By default, documents without a value are ignored.
"""
- script: Union["Script", Dict[str, Any], DefaultType]
+ field: Union[str, InstrumentedField, DefaultType]
+ missing: Union[str, int, float, bool, DefaultType]
def __init__(
self,
*,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if script is not DEFAULT:
- kwargs["script"] = script
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
super().__init__(kwargs)
-class ShapeFieldQuery(AttrDict[Any]):
+class MutualInformationHeuristic(AttrDict[Any]):
"""
- :arg indexed_shape: Queries using a pre-indexed shape.
- :arg relation: Spatial relation between the query shape and the
- document shape.
- :arg shape: Queries using an inline shape definition in GeoJSON or
- Well Known Text (WKT) format.
+ :arg background_is_superset: Set to `false` if you defined a custom
+ background filter that represents a different set of documents
+ that you want to compare to.
+ :arg include_negatives: Set to `false` to filter out the terms that
+ appear less often in the subset than in documents outside the
+ subset.
"""
- indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType]
- relation: Union[
- Literal["intersects", "disjoint", "within", "contains"], DefaultType
- ]
- shape: Any
+ background_is_superset: Union[bool, DefaultType]
+ include_negatives: Union[bool, DefaultType]
def __init__(
self,
*,
- indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT,
- relation: Union[
- Literal["intersects", "disjoint", "within", "contains"], DefaultType
- ] = DEFAULT,
- shape: Any = DEFAULT,
+ background_is_superset: Union[bool, DefaultType] = DEFAULT,
+ include_negatives: Union[bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if indexed_shape is not DEFAULT:
- kwargs["indexed_shape"] = indexed_shape
- if relation is not DEFAULT:
- kwargs["relation"] = relation
- if shape is not DEFAULT:
- kwargs["shape"] = shape
+ if background_is_superset is not DEFAULT:
+ kwargs["background_is_superset"] = background_is_superset
+ if include_negatives is not DEFAULT:
+ kwargs["include_negatives"] = include_negatives
super().__init__(kwargs)
-class SortOptions(AttrDict[Any]):
+class NestedSortValue(AttrDict[Any]):
"""
- :arg _field: The field to use in this query.
- :arg _value: The query value for the field.
- :arg _score:
- :arg _doc:
- :arg _geo_distance:
- :arg _script:
+ :arg path: (required)
+ :arg filter:
+ :arg max_children:
+ :arg nested:
"""
- _field: Union[str, "InstrumentedField", "DefaultType"]
- _value: Union["FieldSort", Dict[str, Any], "DefaultType"]
- _score: Union["ScoreSort", Dict[str, Any], DefaultType]
- _doc: Union["ScoreSort", Dict[str, Any], DefaultType]
- _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType]
- _script: Union["ScriptSort", Dict[str, Any], DefaultType]
+ path: Union[str, InstrumentedField, DefaultType]
+ filter: Union[Query, DefaultType]
+ max_children: Union[int, DefaultType]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
def __init__(
self,
- _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
- _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT,
*,
- _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
- _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
- _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT,
- _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT,
+ path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ filter: Union[Query, DefaultType] = DEFAULT,
+ max_children: Union[int, DefaultType] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if _field is not DEFAULT:
- kwargs[str(_field)] = _value
- if _score is not DEFAULT:
- kwargs["_score"] = _score
- if _doc is not DEFAULT:
- kwargs["_doc"] = _doc
- if _geo_distance is not DEFAULT:
- kwargs["_geo_distance"] = _geo_distance
- if _script is not DEFAULT:
- kwargs["_script"] = _script
+ if path is not DEFAULT:
+ kwargs["path"] = str(path)
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
+ if max_children is not DEFAULT:
+ kwargs["max_children"] = max_children
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
super().__init__(kwargs)
-class SourceFilter(AttrDict[Any]):
+class PercentageScoreHeuristic(AttrDict[Any]):
+ pass
+
+
+class PinnedDoc(AttrDict[Any]):
"""
- :arg excludes:
- :arg includes:
+ :arg _id: (required) The unique document ID.
+ :arg _index: (required) The index that contains the document.
"""
- excludes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- includes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
+ _id: Union[str, DefaultType]
+ _index: Union[str, DefaultType]
def __init__(
self,
*,
- excludes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ] = DEFAULT,
- includes: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ] = DEFAULT,
+ _id: Union[str, DefaultType] = DEFAULT,
+ _index: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if excludes is not DEFAULT:
- kwargs["excludes"] = str(excludes)
- if includes is not DEFAULT:
- kwargs["includes"] = str(includes)
+ if _id is not DEFAULT:
+ kwargs["_id"] = _id
+ if _index is not DEFAULT:
+ kwargs["_index"] = _index
super().__init__(kwargs)
-class SpanQuery(AttrDict[Any]):
+class PrefixQuery(QueryBase):
"""
- :arg span_containing: Accepts a list of span queries, but only returns
- those spans which also match a second span query.
- :arg span_field_masking: Allows queries like `span_near` or `span_or`
- across different fields.
- :arg span_first: Accepts another span query whose matches must appear
- within the first N positions of the field.
- :arg span_gap:
- :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`,
- `regexp`, or `fuzzy` query.
- :arg span_near: Accepts multiple span queries whose matches must be
- within the specified distance of each other, and possibly in the
- same order.
- :arg span_not: Wraps another span query, and excludes any documents
- which match that query.
- :arg span_or: Combines multiple span queries and returns documents
- which match any of the specified queries.
- :arg span_term: The equivalent of the `term` query but for use with
- other span queries.
- :arg span_within: The result from a single span query is returned as
- long is its span falls within the spans returned by a list of
- other span queries.
- """
-
- span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType]
- span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType]
- span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType]
- span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType]
- span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType]
- span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType]
- span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType]
- span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType]
- span_term: Union[
- Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
- Dict[str, Any],
- DefaultType,
- ]
- span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType]
-
- def __init__(
- self,
- *,
- span_containing: Union[
- "SpanContainingQuery", Dict[str, Any], DefaultType
- ] = DEFAULT,
- span_field_masking: Union[
- "SpanFieldMaskingQuery", Dict[str, Any], DefaultType
- ] = DEFAULT,
- span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT,
- span_gap: Union[
- Mapping[Union[str, InstrumentedField], int], DefaultType
- ] = DEFAULT,
- span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT,
- span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT,
- span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT,
- span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT,
- span_term: Union[
- Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
- Dict[str, Any],
- DefaultType,
- ] = DEFAULT,
- span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if span_containing is not DEFAULT:
- kwargs["span_containing"] = span_containing
- if span_field_masking is not DEFAULT:
- kwargs["span_field_masking"] = span_field_masking
- if span_first is not DEFAULT:
- kwargs["span_first"] = span_first
- if span_gap is not DEFAULT:
- kwargs["span_gap"] = str(span_gap)
- if span_multi is not DEFAULT:
- kwargs["span_multi"] = span_multi
- if span_near is not DEFAULT:
- kwargs["span_near"] = span_near
- if span_not is not DEFAULT:
- kwargs["span_not"] = span_not
- if span_or is not DEFAULT:
- kwargs["span_or"] = span_or
- if span_term is not DEFAULT:
- kwargs["span_term"] = str(span_term)
- if span_within is not DEFAULT:
- kwargs["span_within"] = span_within
- super().__init__(kwargs)
-
-
-class SpanTermQuery(QueryBase):
- """
- :arg value: (required)
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg value: (required) Beginning characters of terms you wish to find
+ in the provided field.
+ :arg rewrite: Method used to rewrite the query.
+ :arg case_insensitive: Allows ASCII case insensitive matching of the
+ value with the indexed field values when set to `true`. Default is
+ `false` which means the case sensitivity of matching depends on
+ the underlying field’s mapping.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
value: Union[str, DefaultType]
+ rewrite: Union[str, DefaultType]
+ case_insensitive: Union[bool, DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
@@ -2382,12 +2896,18 @@ def __init__(
self,
*,
value: Union[str, DefaultType] = DEFAULT,
+ rewrite: Union[str, DefaultType] = DEFAULT,
+ case_insensitive: Union[bool, DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
if value is not DEFAULT:
kwargs["value"] = value
+ if rewrite is not DEFAULT:
+ kwargs["rewrite"] = rewrite
+ if case_insensitive is not DEFAULT:
+ kwargs["case_insensitive"] = case_insensitive
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -2395,30 +2915,97 @@ def __init__(
super().__init__(**kwargs)
-class TDigest(AttrDict[Any]):
+class QueryVectorBuilder(AttrDict[Any]):
"""
- :arg compression: Limits the maximum number of nodes used by the
- underlying TDigest algorithm to `20 * compression`, enabling
- control of memory usage and approximation error.
+ :arg text_embedding:
"""
- compression: Union[int, DefaultType]
+ text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType]
def __init__(
- self, *, compression: Union[int, DefaultType] = DEFAULT, **kwargs: Any
+ self,
+ *,
+ text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT,
+ **kwargs: Any,
):
- if compression is not DEFAULT:
- kwargs["compression"] = compression
+ if text_embedding is not DEFAULT:
+ kwargs["text_embedding"] = text_embedding
super().__init__(kwargs)
-class TermQuery(QueryBase):
+class RankFeatureFunction(AttrDict[Any]):
+ pass
+
+
+class RankFeatureFunctionLinear(RankFeatureFunction):
+ pass
+
+
+class RankFeatureFunctionLogarithm(RankFeatureFunction):
"""
- :arg value: (required) Term you wish to find in the provided field.
- :arg case_insensitive: Allows ASCII case insensitive matching of the
- value with the indexed field values when set to `true`. When
- `false`, the case sensitivity of matching depends on the
+ :arg scaling_factor: (required) Configurable scaling factor.
+ """
+
+ scaling_factor: Union[float, DefaultType]
+
+ def __init__(
+ self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any
+ ):
+ if scaling_factor is not DEFAULT:
+ kwargs["scaling_factor"] = scaling_factor
+ super().__init__(**kwargs)
+
+
+class RankFeatureFunctionSaturation(RankFeatureFunction):
+ """
+ :arg pivot: Configurable pivot value so that the result will be less
+ than 0.5.
+ """
+
+ pivot: Union[float, DefaultType]
+
+ def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
+ if pivot is not DEFAULT:
+ kwargs["pivot"] = pivot
+ super().__init__(**kwargs)
+
+
+class RankFeatureFunctionSigmoid(RankFeatureFunction):
+ """
+ :arg pivot: (required) Configurable pivot value so that the result
+ will be less than 0.5.
+ :arg exponent: (required) Configurable Exponent.
+ """
+
+ pivot: Union[float, DefaultType]
+ exponent: Union[float, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ pivot: Union[float, DefaultType] = DEFAULT,
+ exponent: Union[float, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if pivot is not DEFAULT:
+ kwargs["pivot"] = pivot
+ if exponent is not DEFAULT:
+ kwargs["exponent"] = exponent
+ super().__init__(**kwargs)
+
+
+class RegexpQuery(QueryBase):
+ """
+ :arg value: (required) Regular expression for terms you wish to find
+ in the provided field.
+ :arg case_insensitive: Allows case insensitive matching of the regular
+ expression value with the indexed field values when set to `true`.
+ When `false`, case sensitivity of matching depends on the
underlying field’s mapping.
+ :arg flags: Enables optional operators for the regular expression.
+ :arg max_determinized_states: Maximum number of automaton states
+ required for the query. Defaults to `10000` if omitted.
+ :arg rewrite: Method used to rewrite the query.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -2427,16 +3014,22 @@ class TermQuery(QueryBase):
:arg _name:
"""
- value: Union[int, float, str, bool, None, Any, DefaultType]
+ value: Union[str, DefaultType]
case_insensitive: Union[bool, DefaultType]
+ flags: Union[str, DefaultType]
+ max_determinized_states: Union[int, DefaultType]
+ rewrite: Union[str, DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- value: Union[int, float, str, bool, None, Any, DefaultType] = DEFAULT,
+ value: Union[str, DefaultType] = DEFAULT,
case_insensitive: Union[bool, DefaultType] = DEFAULT,
+ flags: Union[str, DefaultType] = DEFAULT,
+ max_determinized_states: Union[int, DefaultType] = DEFAULT,
+ rewrite: Union[str, DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
@@ -2445,6 +3038,12 @@ def __init__(
kwargs["value"] = value
if case_insensitive is not DEFAULT:
kwargs["case_insensitive"] = case_insensitive
+ if flags is not DEFAULT:
+ kwargs["flags"] = flags
+ if max_determinized_states is not DEFAULT:
+ kwargs["max_determinized_states"] = max_determinized_states
+ if rewrite is not DEFAULT:
+ kwargs["rewrite"] = rewrite
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -2452,376 +3051,332 @@ def __init__(
super().__init__(**kwargs)
-class TermsLookup(AttrDict[Any]):
+class RegressionInferenceOptions(AttrDict[Any]):
"""
- :arg index: (required)
- :arg id: (required)
- :arg path: (required)
- :arg routing:
+ :arg results_field: The field that is added to incoming documents to
+ contain the inference prediction. Defaults to predicted_value.
+ :arg num_top_feature_importance_values: Specifies the maximum number
+ of feature importance values per document.
"""
- index: Union[str, DefaultType]
- id: Union[str, DefaultType]
- path: Union[str, InstrumentedField, DefaultType]
- routing: Union[str, DefaultType]
+ results_field: Union[str, InstrumentedField, DefaultType]
+ num_top_feature_importance_values: Union[int, DefaultType]
def __init__(
self,
*,
- index: Union[str, DefaultType] = DEFAULT,
- id: Union[str, DefaultType] = DEFAULT,
- path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- routing: Union[str, DefaultType] = DEFAULT,
+ results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if index is not DEFAULT:
- kwargs["index"] = index
- if id is not DEFAULT:
- kwargs["id"] = id
- if path is not DEFAULT:
- kwargs["path"] = str(path)
- if routing is not DEFAULT:
- kwargs["routing"] = routing
+ if results_field is not DEFAULT:
+ kwargs["results_field"] = str(results_field)
+ if num_top_feature_importance_values is not DEFAULT:
+ kwargs["num_top_feature_importance_values"] = (
+ num_top_feature_importance_values
+ )
super().__init__(kwargs)
-class TermsPartition(AttrDict[Any]):
+class ScoreSort(AttrDict[Any]):
"""
- :arg num_partitions: (required) The number of partitions.
- :arg partition: (required) The partition number for this request.
+ :arg order:
"""
- num_partitions: Union[int, DefaultType]
- partition: Union[int, DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
def __init__(
self,
*,
- num_partitions: Union[int, DefaultType] = DEFAULT,
- partition: Union[int, DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if num_partitions is not DEFAULT:
- kwargs["num_partitions"] = num_partitions
- if partition is not DEFAULT:
- kwargs["partition"] = partition
+ if order is not DEFAULT:
+ kwargs["order"] = order
super().__init__(kwargs)
-class TermsSetQuery(QueryBase):
+class Script(AttrDict[Any]):
"""
- :arg terms: (required) Array of terms you wish to find in the provided
- field.
- :arg minimum_should_match: Specification describing number of matching
- terms required to return a document.
- :arg minimum_should_match_field: Numeric field containing the number
- of matching terms required to return a document.
- :arg minimum_should_match_script: Custom script containing the number
- of matching terms required to return a document.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg source: The script source.
+ :arg id: The `id` for a stored script.
+ :arg params: Specifies any named parameters that are passed into the
+ script as variables. Use parameters instead of hard-coded values
+ to decrease compile time.
+ :arg lang: Specifies the language the script is written in. Defaults
+ to `painless` if omitted.
+ :arg options:
"""
- terms: Union[Sequence[str], DefaultType]
- minimum_should_match: Union[int, str, DefaultType]
- minimum_should_match_field: Union[str, InstrumentedField, DefaultType]
- minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ source: Union[str, DefaultType]
+ id: Union[str, DefaultType]
+ params: Union[Mapping[str, Any], DefaultType]
+ lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType]
+ options: Union[Mapping[str, str], DefaultType]
def __init__(
self,
*,
- terms: Union[Sequence[str], DefaultType] = DEFAULT,
- minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
- minimum_should_match_field: Union[
- str, InstrumentedField, DefaultType
- ] = DEFAULT,
- minimum_should_match_script: Union[
- "Script", Dict[str, Any], DefaultType
+ source: Union[str, DefaultType] = DEFAULT,
+ id: Union[str, DefaultType] = DEFAULT,
+ params: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+ lang: Union[
+ Literal["painless", "expression", "mustache", "java"], DefaultType
] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ options: Union[Mapping[str, str], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if terms is not DEFAULT:
- kwargs["terms"] = terms
- if minimum_should_match is not DEFAULT:
- kwargs["minimum_should_match"] = minimum_should_match
- if minimum_should_match_field is not DEFAULT:
- kwargs["minimum_should_match_field"] = str(minimum_should_match_field)
- if minimum_should_match_script is not DEFAULT:
- kwargs["minimum_should_match_script"] = minimum_should_match_script
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if source is not DEFAULT:
+ kwargs["source"] = source
+ if id is not DEFAULT:
+ kwargs["id"] = id
+ if params is not DEFAULT:
+ kwargs["params"] = params
+ if lang is not DEFAULT:
+ kwargs["lang"] = lang
+ if options is not DEFAULT:
+ kwargs["options"] = options
+ super().__init__(kwargs)
-class TestPopulation(AttrDict[Any]):
+class ScriptField(AttrDict[Any]):
"""
- :arg field: (required) The field to aggregate.
- :arg script:
- :arg filter: A filter used to define a set of records to run unpaired
- t-test on.
+ :arg script: (required)
+ :arg ignore_failure:
"""
- field: Union[str, InstrumentedField, DefaultType]
script: Union["Script", Dict[str, Any], DefaultType]
- filter: Union[Query, DefaultType]
+ ignore_failure: Union[bool, DefaultType]
def __init__(
self,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- filter: Union[Query, DefaultType] = DEFAULT,
+ ignore_failure: Union[bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
if script is not DEFAULT:
kwargs["script"] = script
- if filter is not DEFAULT:
- kwargs["filter"] = filter
+ if ignore_failure is not DEFAULT:
+ kwargs["ignore_failure"] = ignore_failure
super().__init__(kwargs)
-class TextExpansionQuery(QueryBase):
+class ScriptSort(AttrDict[Any]):
"""
- :arg model_id: (required) The text expansion NLP model to use
- :arg model_text: (required) The query text
- :arg pruning_config: Token pruning configurations
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg script: (required)
+ :arg order:
+ :arg type:
+ :arg mode:
+ :arg nested:
"""
- model_id: Union[str, DefaultType]
- model_text: Union[str, DefaultType]
- pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
+ order: Union[Literal["asc", "desc"], DefaultType]
+ type: Union[Literal["string", "number", "version"], DefaultType]
+ mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- model_id: Union[str, DefaultType] = DEFAULT,
- model_text: Union[str, DefaultType] = DEFAULT,
- pruning_config: Union[
- "TokenPruningConfig", Dict[str, Any], DefaultType
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT,
+ mode: Union[
+ Literal["min", "max", "sum", "avg", "median"], DefaultType
] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if model_id is not DEFAULT:
- kwargs["model_id"] = model_id
- if model_text is not DEFAULT:
- kwargs["model_text"] = model_text
- if pruning_config is not DEFAULT:
- kwargs["pruning_config"] = pruning_config
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ if order is not DEFAULT:
+ kwargs["order"] = order
+ if type is not DEFAULT:
+ kwargs["type"] = type
+ if mode is not DEFAULT:
+ kwargs["mode"] = mode
+ if nested is not DEFAULT:
+ kwargs["nested"] = nested
+ super().__init__(kwargs)
-class TokenPruningConfig(AttrDict[Any]):
+class ScriptedHeuristic(AttrDict[Any]):
"""
- :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than
- this threshold times the average frequency of all tokens in the
- specified field are considered outliers and pruned. Defaults to
- `5` if omitted.
- :arg tokens_weight_threshold: Tokens whose weight is less than this
- threshold are considered nonsignificant and pruned. Defaults to
- `0.4` if omitted.
- :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs
- only scoring kept tokens.
+ :arg script: (required)
"""
- tokens_freq_ratio_threshold: Union[int, DefaultType]
- tokens_weight_threshold: Union[float, DefaultType]
- only_score_pruned_tokens: Union[bool, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT,
- tokens_weight_threshold: Union[float, DefaultType] = DEFAULT,
- only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if tokens_freq_ratio_threshold is not DEFAULT:
- kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold
- if tokens_weight_threshold is not DEFAULT:
- kwargs["tokens_weight_threshold"] = tokens_weight_threshold
- if only_score_pruned_tokens is not DEFAULT:
- kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens
+ if script is not DEFAULT:
+ kwargs["script"] = script
super().__init__(kwargs)
-class TopLeftBottomRightGeoBounds(AttrDict[Any]):
+class ShapeFieldQuery(AttrDict[Any]):
"""
- :arg top_left: (required)
- :arg bottom_right: (required)
+ :arg indexed_shape: Queries using a pre-indexed shape.
+ :arg relation: Spatial relation between the query shape and the
+ document shape.
+ :arg shape: Queries using an inline shape definition in GeoJSON or
+ Well Known Text (WKT) format.
"""
- top_left: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
- DefaultType,
- ]
- bottom_right: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
- DefaultType,
+ indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType]
+ relation: Union[
+ Literal["intersects", "disjoint", "within", "contains"], DefaultType
]
+ shape: Any
def __init__(
self,
*,
- top_left: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
- DefaultType,
- ] = DEFAULT,
- bottom_right: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
- DefaultType,
+ indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT,
+ relation: Union[
+ Literal["intersects", "disjoint", "within", "contains"], DefaultType
] = DEFAULT,
+ shape: Any = DEFAULT,
**kwargs: Any,
):
- if top_left is not DEFAULT:
- kwargs["top_left"] = top_left
- if bottom_right is not DEFAULT:
- kwargs["bottom_right"] = bottom_right
+ if indexed_shape is not DEFAULT:
+ kwargs["indexed_shape"] = indexed_shape
+ if relation is not DEFAULT:
+ kwargs["relation"] = relation
+ if shape is not DEFAULT:
+ kwargs["shape"] = shape
super().__init__(kwargs)
-class TopMetricsValue(AttrDict[Any]):
+class SortOptions(AttrDict[Any]):
"""
- :arg field: (required) A field to return as a metric.
+ :arg _field: The field to use in this query.
+ :arg _value: The query value for the field.
+ :arg _score:
+ :arg _doc:
+ :arg _geo_distance:
+ :arg _script:
"""
- field: Union[str, InstrumentedField, DefaultType]
+ _field: Union[str, "InstrumentedField", "DefaultType"]
+ _value: Union["FieldSort", Dict[str, Any], "DefaultType"]
+ _score: Union["ScoreSort", Dict[str, Any], DefaultType]
+ _doc: Union["ScoreSort", Dict[str, Any], DefaultType]
+ _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType]
+ _script: Union["ScriptSort", Dict[str, Any], DefaultType]
def __init__(
self,
+ _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+ _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT,
+ _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
+ if _field is not DEFAULT:
+ kwargs[str(_field)] = _value
+ if _score is not DEFAULT:
+ kwargs["_score"] = _score
+ if _doc is not DEFAULT:
+ kwargs["_doc"] = _doc
+ if _geo_distance is not DEFAULT:
+ kwargs["_geo_distance"] = _geo_distance
+ if _script is not DEFAULT:
+ kwargs["_script"] = _script
super().__init__(kwargs)
-class TopRightBottomLeftGeoBounds(AttrDict[Any]):
+class SourceFilter(AttrDict[Any]):
"""
- :arg top_right: (required)
- :arg bottom_left: (required)
+ :arg excludes:
+ :arg includes:
"""
- top_right: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
+ excludes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
]
- bottom_left: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
+ includes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
]
def __init__(
self,
*,
- top_right: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
+ excludes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
] = DEFAULT,
- bottom_left: Union[
- "LatLonGeoLocation",
- "GeoHashLocation",
- Sequence[float],
- str,
- Dict[str, Any],
+ includes: Union[
+ Union[str, InstrumentedField],
+ Sequence[Union[str, InstrumentedField]],
DefaultType,
] = DEFAULT,
**kwargs: Any,
):
- if top_right is not DEFAULT:
- kwargs["top_right"] = top_right
- if bottom_left is not DEFAULT:
- kwargs["bottom_left"] = bottom_left
+ if excludes is not DEFAULT:
+ kwargs["excludes"] = str(excludes)
+ if includes is not DEFAULT:
+ kwargs["includes"] = str(includes)
super().__init__(kwargs)
-class WeightedAverageValue(AttrDict[Any]):
+class SpanContainingQuery(QueryBase):
"""
- :arg field: The field from which to extract the values or weights.
- :arg missing: A value or weight to use if the field is missing.
- :arg script:
+ :arg big: (required) Can be any span query. Matching spans from `big`
+ that contain matches from `little` are returned.
+ :arg little: (required) Can be any span query. Matching spans from
+ `big` that contain matches from `little` are returned.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- field: Union[str, InstrumentedField, DefaultType]
- missing: Union[float, DefaultType]
- script: Union["Script", Dict[str, Any], DefaultType]
+ big: Union["SpanQuery", Dict[str, Any], DefaultType]
+ little: Union["SpanQuery", Dict[str, Any], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- missing: Union[float, DefaultType] = DEFAULT,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
- if missing is not DEFAULT:
- kwargs["missing"] = missing
- if script is not DEFAULT:
- kwargs["script"] = script
- super().__init__(kwargs)
+ if big is not DEFAULT:
+ kwargs["big"] = big
+ if little is not DEFAULT:
+ kwargs["little"] = little
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class WeightedTokensQuery(QueryBase):
+class SpanFieldMaskingQuery(QueryBase):
"""
- :arg tokens: (required) The tokens representing this query
- :arg pruning_config: Token pruning configurations
+ :arg field: (required)
+ :arg query: (required)
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -2830,26 +3385,24 @@ class WeightedTokensQuery(QueryBase):
:arg _name:
"""
- tokens: Union[Mapping[str, float], DefaultType]
- pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
+ field: Union[str, InstrumentedField, DefaultType]
+ query: Union["SpanQuery", Dict[str, Any], DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- tokens: Union[Mapping[str, float], DefaultType] = DEFAULT,
- pruning_config: Union[
- "TokenPruningConfig", Dict[str, Any], DefaultType
- ] = DEFAULT,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if tokens is not DEFAULT:
- kwargs["tokens"] = tokens
- if pruning_config is not DEFAULT:
- kwargs["pruning_config"] = pruning_config
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if query is not DEFAULT:
+ kwargs["query"] = query
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -2857,17 +3410,11 @@ def __init__(
super().__init__(**kwargs)
-class WildcardQuery(QueryBase):
+class SpanFirstQuery(QueryBase):
"""
- :arg case_insensitive: Allows case insensitive matching of the pattern
- with the indexed field values when set to true. Default is false
- which means the case sensitivity of matching depends on the
- underlying field’s mapping.
- :arg rewrite: Method used to rewrite the query.
- :arg value: Wildcard pattern for terms you wish to find in the
- provided field. Required, when wildcard is not set.
- :arg wildcard: Wildcard pattern for terms you wish to find in the
- provided field. Required, when value is not set.
+ :arg end: (required) Controls the maximum end position permitted in a
+ match.
+ :arg match: (required) Can be any other span type query.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -2876,32 +3423,24 @@ class WildcardQuery(QueryBase):
:arg _name:
"""
- case_insensitive: Union[bool, DefaultType]
- rewrite: Union[str, DefaultType]
- value: Union[str, DefaultType]
- wildcard: Union[str, DefaultType]
+ end: Union[int, DefaultType]
+ match: Union["SpanQuery", Dict[str, Any], DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- case_insensitive: Union[bool, DefaultType] = DEFAULT,
- rewrite: Union[str, DefaultType] = DEFAULT,
- value: Union[str, DefaultType] = DEFAULT,
- wildcard: Union[str, DefaultType] = DEFAULT,
+ end: Union[int, DefaultType] = DEFAULT,
+ match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if case_insensitive is not DEFAULT:
- kwargs["case_insensitive"] = case_insensitive
- if rewrite is not DEFAULT:
- kwargs["rewrite"] = rewrite
- if value is not DEFAULT:
- kwargs["value"] = value
- if wildcard is not DEFAULT:
- kwargs["wildcard"] = wildcard
+ if end is not DEFAULT:
+ kwargs["end"] = end
+ if match is not DEFAULT:
+ kwargs["match"] = match
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -2909,606 +3448,528 @@ def __init__(
super().__init__(**kwargs)
-class WktGeoBounds(AttrDict[Any]):
+class SpanMultiTermQuery(QueryBase):
"""
- :arg wkt: (required)
+ :arg match: (required) Should be a multi term query (one of
+ `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query).
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- wkt: Union[str, DefaultType]
+ match: Union[Query, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
- def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
- if wkt is not DEFAULT:
- kwargs["wkt"] = wkt
- super().__init__(kwargs)
+ def __init__(
+ self,
+ *,
+ match: Union[Query, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if match is not DEFAULT:
+ kwargs["match"] = match
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]):
+class SpanNearQuery(QueryBase):
"""
- :arg indicator: (required) The indicator with which to correlate the
- configured `bucket_path` values.
+ :arg clauses: (required) Array of one or more other span type queries.
+ :arg in_order: Controls whether matches are required to be in-order.
+ :arg slop: Controls the maximum number of intervening unmatched
+ positions permitted.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- indicator: Union[
- "BucketCorrelationFunctionCountCorrelationIndicator",
- Dict[str, Any],
- DefaultType,
- ]
+ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
+ in_order: Union[bool, DefaultType]
+ slop: Union[int, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- indicator: Union[
- "BucketCorrelationFunctionCountCorrelationIndicator",
- Dict[str, Any],
- DefaultType,
+ clauses: Union[
+ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
+ in_order: Union[bool, DefaultType] = DEFAULT,
+ slop: Union[int, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if indicator is not DEFAULT:
- kwargs["indicator"] = indicator
- super().__init__(kwargs)
+ if clauses is not DEFAULT:
+ kwargs["clauses"] = clauses
+ if in_order is not DEFAULT:
+ kwargs["in_order"] = in_order
+ if slop is not DEFAULT:
+ kwargs["slop"] = slop
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class FieldLookup(AttrDict[Any]):
+class SpanNotQuery(QueryBase):
"""
- :arg id: (required) `id` of the document.
- :arg index: Index from which to retrieve the document.
- :arg path: Name of the field.
- :arg routing: Custom routing value.
+ :arg exclude: (required) Span query whose matches must not overlap
+ those returned.
+ :arg include: (required) Span query whose matches are filtered.
+ :arg dist: The number of tokens from within the include span that
+ can’t have overlap with the exclude span. Equivalent to setting
+ both `pre` and `post`.
+ :arg post: The number of tokens after the include span that can’t have
+ overlap with the exclude span.
+ :arg pre: The number of tokens before the include span that can’t have
+ overlap with the exclude span.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- id: Union[str, DefaultType]
- index: Union[str, DefaultType]
- path: Union[str, InstrumentedField, DefaultType]
- routing: Union[str, DefaultType]
+ exclude: Union["SpanQuery", Dict[str, Any], DefaultType]
+ include: Union["SpanQuery", Dict[str, Any], DefaultType]
+ dist: Union[int, DefaultType]
+ post: Union[int, DefaultType]
+ pre: Union[int, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- id: Union[str, DefaultType] = DEFAULT,
- index: Union[str, DefaultType] = DEFAULT,
- path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- routing: Union[str, DefaultType] = DEFAULT,
+ exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ dist: Union[int, DefaultType] = DEFAULT,
+ post: Union[int, DefaultType] = DEFAULT,
+ pre: Union[int, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if id is not DEFAULT:
- kwargs["id"] = id
- if index is not DEFAULT:
- kwargs["index"] = index
- if path is not DEFAULT:
- kwargs["path"] = str(path)
- if routing is not DEFAULT:
- kwargs["routing"] = routing
- super().__init__(kwargs)
+ if exclude is not DEFAULT:
+ kwargs["exclude"] = exclude
+ if include is not DEFAULT:
+ kwargs["include"] = include
+ if dist is not DEFAULT:
+ kwargs["dist"] = dist
+ if post is not DEFAULT:
+ kwargs["post"] = post
+ if pre is not DEFAULT:
+ kwargs["pre"] = pre
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class HighlightField(HighlightBase):
+class SpanOrQuery(QueryBase):
"""
- :arg fragment_offset:
- :arg matched_fields:
- :arg analyzer:
- :arg type:
- :arg boundary_chars: A string that contains each boundary character.
- Defaults to `.,!? \t\n` if omitted.
- :arg boundary_max_scan: How far to scan for boundary characters.
- Defaults to `20` if omitted.
- :arg boundary_scanner: Specifies how to break the highlighted
- fragments: chars, sentence, or word. Only valid for the unified
- and fvh highlighters. Defaults to `sentence` for the `unified`
- highlighter. Defaults to `chars` for the `fvh` highlighter.
- :arg boundary_scanner_locale: Controls which locale is used to search
- for sentence and word boundaries. This parameter takes a form of a
- language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
- Defaults to `Locale.ROOT` if omitted.
- :arg force_source:
- :arg fragmenter: Specifies how text should be broken up in highlight
- snippets: `simple` or `span`. Only valid for the `plain`
- highlighter. Defaults to `span` if omitted.
- :arg fragment_size: The size of the highlighted fragment in
- characters. Defaults to `100` if omitted.
- :arg highlight_filter:
- :arg highlight_query: Highlight matches for a query other than the
- search query. This is especially useful if you use a rescore query
- because those are not taken into account by highlighting by
- default.
- :arg max_fragment_length:
- :arg max_analyzed_offset: If set to a non-negative value, highlighting
- stops at this defined maximum limit. The rest of the text is not
- processed, thus not highlighted and no error is returned The
- `max_analyzed_offset` query setting does not override the
- `index.highlight.max_analyzed_offset` setting, which prevails when
- it’s set to lower value than the query setting.
- :arg no_match_size: The amount of text you want to return from the
- beginning of the field if there are no matching fragments to
- highlight.
- :arg number_of_fragments: The maximum number of fragments to return.
- If the number of fragments is set to `0`, no fragments are
- returned. Instead, the entire field contents are highlighted and
- returned. This can be handy when you need to highlight short texts
- such as a title or address, but fragmentation is not required. If
- `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
- to `5` if omitted.
- :arg options:
- :arg order: Sorts highlighted fragments by score when set to `score`.
- By default, fragments will be output in the order they appear in
- the field (order: `none`). Setting this option to `score` will
- output the most relevant fragments first. Each highlighter applies
- its own logic to compute relevancy scores. Defaults to `none` if
- omitted.
- :arg phrase_limit: Controls the number of matching phrases in a
- document that are considered. Prevents the `fvh` highlighter from
- analyzing too many phrases and consuming too much memory. When
- using `matched_fields`, `phrase_limit` phrases per matched field
- are considered. Raising the limit increases query time and
- consumes more memory. Only supported by the `fvh` highlighter.
- Defaults to `256` if omitted.
- :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
- tags to use for the highlighted text. By default, highlighted text
- is wrapped in `` and `` tags.
- :arg require_field_match: By default, only fields that contains a
- query match are highlighted. Set to `false` to highlight all
- fields. Defaults to `True` if omitted.
- :arg tags_schema: Set to `styled` to use the built-in tag schema.
+ :arg clauses: (required) Array of one or more other span type queries.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- fragment_offset: Union[int, DefaultType]
- matched_fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ]
- analyzer: Union[str, Dict[str, Any], DefaultType]
- type: Union[Literal["plain", "fvh", "unified"], DefaultType]
- boundary_chars: Union[str, DefaultType]
- boundary_max_scan: Union[int, DefaultType]
- boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
- boundary_scanner_locale: Union[str, DefaultType]
- force_source: Union[bool, DefaultType]
- fragmenter: Union[Literal["simple", "span"], DefaultType]
- fragment_size: Union[int, DefaultType]
- highlight_filter: Union[bool, DefaultType]
- highlight_query: Union[Query, DefaultType]
- max_fragment_length: Union[int, DefaultType]
- max_analyzed_offset: Union[int, DefaultType]
- no_match_size: Union[int, DefaultType]
- number_of_fragments: Union[int, DefaultType]
- options: Union[Mapping[str, Any], DefaultType]
- order: Union[Literal["score"], DefaultType]
- phrase_limit: Union[int, DefaultType]
- post_tags: Union[Sequence[str], DefaultType]
- pre_tags: Union[Sequence[str], DefaultType]
- require_field_match: Union[bool, DefaultType]
- tags_schema: Union[Literal["styled"], DefaultType]
+ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- fragment_offset: Union[int, DefaultType] = DEFAULT,
- matched_fields: Union[
- Union[str, InstrumentedField],
- Sequence[Union[str, InstrumentedField]],
- DefaultType,
- ] = DEFAULT,
- analyzer: Union[str, Dict[str, Any], DefaultType] = DEFAULT,
- type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
- boundary_chars: Union[str, DefaultType] = DEFAULT,
- boundary_max_scan: Union[int, DefaultType] = DEFAULT,
- boundary_scanner: Union[
- Literal["chars", "sentence", "word"], DefaultType
+ clauses: Union[
+ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
] = DEFAULT,
- boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
- force_source: Union[bool, DefaultType] = DEFAULT,
- fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
- fragment_size: Union[int, DefaultType] = DEFAULT,
- highlight_filter: Union[bool, DefaultType] = DEFAULT,
- highlight_query: Union[Query, DefaultType] = DEFAULT,
- max_fragment_length: Union[int, DefaultType] = DEFAULT,
- max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
- no_match_size: Union[int, DefaultType] = DEFAULT,
- number_of_fragments: Union[int, DefaultType] = DEFAULT,
- options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["score"], DefaultType] = DEFAULT,
- phrase_limit: Union[int, DefaultType] = DEFAULT,
- post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
- require_field_match: Union[bool, DefaultType] = DEFAULT,
- tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if fragment_offset is not DEFAULT:
- kwargs["fragment_offset"] = fragment_offset
- if matched_fields is not DEFAULT:
- kwargs["matched_fields"] = str(matched_fields)
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if type is not DEFAULT:
- kwargs["type"] = type
- if boundary_chars is not DEFAULT:
- kwargs["boundary_chars"] = boundary_chars
- if boundary_max_scan is not DEFAULT:
- kwargs["boundary_max_scan"] = boundary_max_scan
- if boundary_scanner is not DEFAULT:
- kwargs["boundary_scanner"] = boundary_scanner
- if boundary_scanner_locale is not DEFAULT:
- kwargs["boundary_scanner_locale"] = boundary_scanner_locale
- if force_source is not DEFAULT:
- kwargs["force_source"] = force_source
- if fragmenter is not DEFAULT:
- kwargs["fragmenter"] = fragmenter
- if fragment_size is not DEFAULT:
- kwargs["fragment_size"] = fragment_size
- if highlight_filter is not DEFAULT:
- kwargs["highlight_filter"] = highlight_filter
- if highlight_query is not DEFAULT:
- kwargs["highlight_query"] = highlight_query
- if max_fragment_length is not DEFAULT:
- kwargs["max_fragment_length"] = max_fragment_length
- if max_analyzed_offset is not DEFAULT:
- kwargs["max_analyzed_offset"] = max_analyzed_offset
- if no_match_size is not DEFAULT:
- kwargs["no_match_size"] = no_match_size
- if number_of_fragments is not DEFAULT:
- kwargs["number_of_fragments"] = number_of_fragments
- if options is not DEFAULT:
- kwargs["options"] = options
- if order is not DEFAULT:
- kwargs["order"] = order
- if phrase_limit is not DEFAULT:
- kwargs["phrase_limit"] = phrase_limit
- if post_tags is not DEFAULT:
- kwargs["post_tags"] = post_tags
- if pre_tags is not DEFAULT:
- kwargs["pre_tags"] = pre_tags
- if require_field_match is not DEFAULT:
- kwargs["require_field_match"] = require_field_match
- if tags_schema is not DEFAULT:
- kwargs["tags_schema"] = tags_schema
+ if clauses is not DEFAULT:
+ kwargs["clauses"] = clauses
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
super().__init__(**kwargs)
-class RegressionInferenceOptions(AttrDict[Any]):
- """
- :arg results_field: The field that is added to incoming documents to
- contain the inference prediction. Defaults to predicted_value.
- :arg num_top_feature_importance_values: Specifies the maximum number
- of feature importance values per document.
- """
-
- results_field: Union[str, InstrumentedField, DefaultType]
- num_top_feature_importance_values: Union[int, DefaultType]
-
- def __init__(
- self,
- *,
- results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if results_field is not DEFAULT:
- kwargs["results_field"] = str(results_field)
- if num_top_feature_importance_values is not DEFAULT:
- kwargs["num_top_feature_importance_values"] = (
- num_top_feature_importance_values
- )
- super().__init__(kwargs)
-
-
-class ClassificationInferenceOptions(AttrDict[Any]):
+class SpanQuery(AttrDict[Any]):
"""
- :arg num_top_classes: Specifies the number of top class predictions to
- return. Defaults to 0.
- :arg num_top_feature_importance_values: Specifies the maximum number
- of feature importance values per document.
- :arg prediction_field_type: Specifies the type of the predicted field
- to write. Acceptable values are: string, number, boolean. When
- boolean is provided 1.0 is transformed to true and 0.0 to false.
- :arg results_field: The field that is added to incoming documents to
- contain the inference prediction. Defaults to predicted_value.
- :arg top_classes_results_field: Specifies the field to which the top
- classes are written. Defaults to top_classes.
+ :arg span_containing: Accepts a list of span queries, but only returns
+ those spans which also match a second span query.
+ :arg span_field_masking: Allows queries like `span_near` or `span_or`
+ across different fields.
+ :arg span_first: Accepts another span query whose matches must appear
+ within the first N positions of the field.
+ :arg span_gap:
+ :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`,
+ `regexp`, or `fuzzy` query.
+ :arg span_near: Accepts multiple span queries whose matches must be
+ within the specified distance of each other, and possibly in the
+ same order.
+ :arg span_not: Wraps another span query, and excludes any documents
+ which match that query.
+ :arg span_or: Combines multiple span queries and returns documents
+ which match any of the specified queries.
+ :arg span_term: The equivalent of the `term` query but for use with
+ other span queries.
+ :arg span_within: The result from a single span query is returned as
+ long is its span falls within the spans returned by a list of
+ other span queries.
"""
- num_top_classes: Union[int, DefaultType]
- num_top_feature_importance_values: Union[int, DefaultType]
- prediction_field_type: Union[str, DefaultType]
- results_field: Union[str, DefaultType]
- top_classes_results_field: Union[str, DefaultType]
+ span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType]
+ span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType]
+ span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType]
+ span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType]
+ span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType]
+ span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType]
+ span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType]
+ span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType]
+ span_term: Union[
+ Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
+ Dict[str, Any],
+ DefaultType,
+ ]
+ span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType]
def __init__(
self,
*,
- num_top_classes: Union[int, DefaultType] = DEFAULT,
- num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
- prediction_field_type: Union[str, DefaultType] = DEFAULT,
- results_field: Union[str, DefaultType] = DEFAULT,
- top_classes_results_field: Union[str, DefaultType] = DEFAULT,
+ span_containing: Union[
+ "SpanContainingQuery", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ span_field_masking: Union[
+ "SpanFieldMaskingQuery", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ span_gap: Union[
+ Mapping[Union[str, InstrumentedField], int], DefaultType
+ ] = DEFAULT,
+ span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ span_term: Union[
+ Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
+ span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if num_top_classes is not DEFAULT:
- kwargs["num_top_classes"] = num_top_classes
- if num_top_feature_importance_values is not DEFAULT:
- kwargs["num_top_feature_importance_values"] = (
- num_top_feature_importance_values
- )
- if prediction_field_type is not DEFAULT:
- kwargs["prediction_field_type"] = prediction_field_type
- if results_field is not DEFAULT:
- kwargs["results_field"] = results_field
- if top_classes_results_field is not DEFAULT:
- kwargs["top_classes_results_field"] = top_classes_results_field
+ if span_containing is not DEFAULT:
+ kwargs["span_containing"] = span_containing
+ if span_field_masking is not DEFAULT:
+ kwargs["span_field_masking"] = span_field_masking
+ if span_first is not DEFAULT:
+ kwargs["span_first"] = span_first
+ if span_gap is not DEFAULT:
+ kwargs["span_gap"] = str(span_gap)
+ if span_multi is not DEFAULT:
+ kwargs["span_multi"] = span_multi
+ if span_near is not DEFAULT:
+ kwargs["span_near"] = span_near
+ if span_not is not DEFAULT:
+ kwargs["span_not"] = span_not
+ if span_or is not DEFAULT:
+ kwargs["span_or"] = span_or
+ if span_term is not DEFAULT:
+ kwargs["span_term"] = str(span_term)
+ if span_within is not DEFAULT:
+ kwargs["span_within"] = span_within
super().__init__(kwargs)
-class FieldCollapse(AttrDict[Any]):
+class SpanTermQuery(QueryBase):
"""
- :arg field: (required) The field to collapse the result set on
- :arg inner_hits: The number of inner hits and their sort order
- :arg max_concurrent_group_searches: The number of concurrent requests
- allowed to retrieve the inner_hits per group
- :arg collapse:
+ :arg value: (required)
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- field: Union[str, InstrumentedField, DefaultType]
- inner_hits: Union[
- "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
- ]
- max_concurrent_group_searches: Union[int, DefaultType]
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
+ value: Union[str, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- inner_hits: Union[
- "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
- ] = DEFAULT,
- max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT,
- collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
+ value: Union[str, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if field is not DEFAULT:
- kwargs["field"] = str(field)
- if inner_hits is not DEFAULT:
- kwargs["inner_hits"] = inner_hits
- if max_concurrent_group_searches is not DEFAULT:
- kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches
- if collapse is not DEFAULT:
- kwargs["collapse"] = collapse
- super().__init__(kwargs)
+ if value is not DEFAULT:
+ kwargs["value"] = value
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class IntervalsAllOf(AttrDict[Any]):
+class SpanWithinQuery(QueryBase):
"""
- :arg intervals: (required) An array of rules to combine. All rules
- must produce a match in a document for the overall source to
- match.
- :arg max_gaps: Maximum number of positions between the matching terms.
- Intervals produced by the rules further apart than this are not
- considered matches. Defaults to `-1` if omitted.
- :arg ordered: If `true`, intervals produced by the rules should appear
- in the order in which they are specified.
- :arg filter: Rule used to filter returned intervals.
+ :arg big: (required) Can be any span query. Matching spans from
+ `little` that are enclosed within `big` are returned.
+ :arg little: (required) Can be any span query. Matching spans from
+ `little` that are enclosed within `big` are returned.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- intervals: Union[
- Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
- ]
- max_gaps: Union[int, DefaultType]
- ordered: Union[bool, DefaultType]
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+ big: Union["SpanQuery", Dict[str, Any], DefaultType]
+ little: Union["SpanQuery", Dict[str, Any], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- intervals: Union[
- Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
- ] = DEFAULT,
- max_gaps: Union[int, DefaultType] = DEFAULT,
- ordered: Union[bool, DefaultType] = DEFAULT,
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+ big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if intervals is not DEFAULT:
- kwargs["intervals"] = intervals
- if max_gaps is not DEFAULT:
- kwargs["max_gaps"] = max_gaps
- if ordered is not DEFAULT:
- kwargs["ordered"] = ordered
- if filter is not DEFAULT:
- kwargs["filter"] = filter
- super().__init__(kwargs)
+ if big is not DEFAULT:
+ kwargs["big"] = big
+ if little is not DEFAULT:
+ kwargs["little"] = little
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class IntervalsAnyOf(AttrDict[Any]):
+class TDigest(AttrDict[Any]):
"""
- :arg intervals: (required) An array of rules to match.
- :arg filter: Rule used to filter returned intervals.
+ :arg compression: Limits the maximum number of nodes used by the
+ underlying TDigest algorithm to `20 * compression`, enabling
+ control of memory usage and approximation error.
"""
- intervals: Union[
- Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
- ]
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+ compression: Union[int, DefaultType]
def __init__(
- self,
- *,
- intervals: Union[
- Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
- ] = DEFAULT,
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
+ self, *, compression: Union[int, DefaultType] = DEFAULT, **kwargs: Any
):
- if intervals is not DEFAULT:
- kwargs["intervals"] = intervals
- if filter is not DEFAULT:
- kwargs["filter"] = filter
+ if compression is not DEFAULT:
+ kwargs["compression"] = compression
super().__init__(kwargs)
-class IntervalsFuzzy(AttrDict[Any]):
+class TermQuery(QueryBase):
"""
- :arg term: (required) The term to match.
- :arg analyzer: Analyzer used to normalize the term.
- :arg fuzziness: Maximum edit distance allowed for matching. Defaults
- to `auto` if omitted.
- :arg prefix_length: Number of beginning characters left unchanged when
- creating expansions.
- :arg transpositions: Indicates whether edits include transpositions of
- two adjacent characters (for example, `ab` to `ba`). Defaults to
- `True` if omitted.
- :arg use_field: If specified, match intervals from this field rather
- than the top-level field. The `term` is normalized using the
- search analyzer from this field, unless `analyzer` is specified
- separately.
+ :arg value: (required) Term you wish to find in the provided field.
+ :arg case_insensitive: Allows ASCII case insensitive matching of the
+ value with the indexed field values when set to `true`. When
+ `false`, the case sensitivity of matching depends on the
+ underlying field’s mapping.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- term: Union[str, DefaultType]
- analyzer: Union[str, DefaultType]
- fuzziness: Union[str, int, DefaultType]
- prefix_length: Union[int, DefaultType]
- transpositions: Union[bool, DefaultType]
- use_field: Union[str, InstrumentedField, DefaultType]
+ value: Union[int, float, str, bool, None, Any, DefaultType]
+ case_insensitive: Union[bool, DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- term: Union[str, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- fuzziness: Union[str, int, DefaultType] = DEFAULT,
- prefix_length: Union[int, DefaultType] = DEFAULT,
- transpositions: Union[bool, DefaultType] = DEFAULT,
- use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ value: Union[int, float, str, bool, None, Any, DefaultType] = DEFAULT,
+ case_insensitive: Union[bool, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if term is not DEFAULT:
- kwargs["term"] = term
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if fuzziness is not DEFAULT:
- kwargs["fuzziness"] = fuzziness
- if prefix_length is not DEFAULT:
- kwargs["prefix_length"] = prefix_length
- if transpositions is not DEFAULT:
- kwargs["transpositions"] = transpositions
- if use_field is not DEFAULT:
- kwargs["use_field"] = str(use_field)
- super().__init__(kwargs)
+ if value is not DEFAULT:
+ kwargs["value"] = value
+ if case_insensitive is not DEFAULT:
+ kwargs["case_insensitive"] = case_insensitive
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class IntervalsMatch(AttrDict[Any]):
+class TermsLookup(AttrDict[Any]):
"""
- :arg query: (required) Text you wish to find in the provided field.
- :arg analyzer: Analyzer used to analyze terms in the query.
- :arg max_gaps: Maximum number of positions between the matching terms.
- Terms further apart than this are not considered matches. Defaults
- to `-1` if omitted.
- :arg ordered: If `true`, matching terms must appear in their specified
- order.
- :arg use_field: If specified, match intervals from this field rather
- than the top-level field. The `term` is normalized using the
- search analyzer from this field, unless `analyzer` is specified
- separately.
- :arg filter: An optional interval filter.
+ :arg index: (required)
+ :arg id: (required)
+ :arg path: (required)
+ :arg routing:
"""
- query: Union[str, DefaultType]
- analyzer: Union[str, DefaultType]
- max_gaps: Union[int, DefaultType]
- ordered: Union[bool, DefaultType]
- use_field: Union[str, InstrumentedField, DefaultType]
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+ index: Union[str, DefaultType]
+ id: Union[str, DefaultType]
+ path: Union[str, InstrumentedField, DefaultType]
+ routing: Union[str, DefaultType]
def __init__(
self,
*,
- query: Union[str, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- max_gaps: Union[int, DefaultType] = DEFAULT,
- ordered: Union[bool, DefaultType] = DEFAULT,
- use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+ index: Union[str, DefaultType] = DEFAULT,
+ id: Union[str, DefaultType] = DEFAULT,
+ path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ routing: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if query is not DEFAULT:
- kwargs["query"] = query
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if max_gaps is not DEFAULT:
- kwargs["max_gaps"] = max_gaps
- if ordered is not DEFAULT:
- kwargs["ordered"] = ordered
- if use_field is not DEFAULT:
- kwargs["use_field"] = str(use_field)
- if filter is not DEFAULT:
- kwargs["filter"] = filter
+ if index is not DEFAULT:
+ kwargs["index"] = index
+ if id is not DEFAULT:
+ kwargs["id"] = id
+ if path is not DEFAULT:
+ kwargs["path"] = str(path)
+ if routing is not DEFAULT:
+ kwargs["routing"] = routing
+ super().__init__(kwargs)
+
+
+class TermsPartition(AttrDict[Any]):
+ """
+ :arg num_partitions: (required) The number of partitions.
+ :arg partition: (required) The partition number for this request.
+ """
+
+ num_partitions: Union[int, DefaultType]
+ partition: Union[int, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ num_partitions: Union[int, DefaultType] = DEFAULT,
+ partition: Union[int, DefaultType] = DEFAULT,
+ **kwargs: Any,
+ ):
+ if num_partitions is not DEFAULT:
+ kwargs["num_partitions"] = num_partitions
+ if partition is not DEFAULT:
+ kwargs["partition"] = partition
super().__init__(kwargs)
-class IntervalsPrefix(AttrDict[Any]):
+class TermsSetQuery(QueryBase):
"""
- :arg prefix: (required) Beginning characters of terms you wish to find
- in the top-level field.
- :arg analyzer: Analyzer used to analyze the `prefix`.
- :arg use_field: If specified, match intervals from this field rather
- than the top-level field. The `prefix` is normalized using the
- search analyzer from this field, unless `analyzer` is specified
- separately.
+ :arg terms: (required) Array of terms you wish to find in the provided
+ field.
+ :arg minimum_should_match: Specification describing number of matching
+ terms required to return a document.
+ :arg minimum_should_match_field: Numeric field containing the number
+ of matching terms required to return a document.
+ :arg minimum_should_match_script: Custom script containing the number
+ of matching terms required to return a document.
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
"""
- prefix: Union[str, DefaultType]
- analyzer: Union[str, DefaultType]
- use_field: Union[str, InstrumentedField, DefaultType]
+ terms: Union[Sequence[str], DefaultType]
+ minimum_should_match: Union[int, str, DefaultType]
+ minimum_should_match_field: Union[str, InstrumentedField, DefaultType]
+ minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
def __init__(
self,
*,
- prefix: Union[str, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ terms: Union[Sequence[str], DefaultType] = DEFAULT,
+ minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+ minimum_should_match_field: Union[
+ str, InstrumentedField, DefaultType
+ ] = DEFAULT,
+ minimum_should_match_script: Union[
+ "Script", Dict[str, Any], DefaultType
+ ] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if prefix is not DEFAULT:
- kwargs["prefix"] = prefix
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if use_field is not DEFAULT:
- kwargs["use_field"] = str(use_field)
- super().__init__(kwargs)
+ if terms is not DEFAULT:
+ kwargs["terms"] = terms
+ if minimum_should_match is not DEFAULT:
+ kwargs["minimum_should_match"] = minimum_should_match
+ if minimum_should_match_field is not DEFAULT:
+ kwargs["minimum_should_match_field"] = str(minimum_should_match_field)
+ if minimum_should_match_script is not DEFAULT:
+ kwargs["minimum_should_match_script"] = minimum_should_match_script
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class IntervalsWildcard(AttrDict[Any]):
+class TestPopulation(AttrDict[Any]):
"""
- :arg pattern: (required) Wildcard pattern used to find matching terms.
- :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the
- top-level field's analyzer.
- :arg use_field: If specified, match intervals from this field rather
- than the top-level field. The `pattern` is normalized using the
- search analyzer from this field, unless `analyzer` is specified
- separately.
+ :arg field: (required) The field to aggregate.
+ :arg script:
+ :arg filter: A filter used to define a set of records to run unpaired
+ t-test on.
"""
- pattern: Union[str, DefaultType]
- analyzer: Union[str, DefaultType]
- use_field: Union[str, InstrumentedField, DefaultType]
+ field: Union[str, InstrumentedField, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
+ filter: Union[Query, DefaultType]
def __init__(
self,
*,
- pattern: Union[str, DefaultType] = DEFAULT,
- analyzer: Union[str, DefaultType] = DEFAULT,
- use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+ filter: Union[Query, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if pattern is not DEFAULT:
- kwargs["pattern"] = pattern
- if analyzer is not DEFAULT:
- kwargs["analyzer"] = analyzer
- if use_field is not DEFAULT:
- kwargs["use_field"] = str(use_field)
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ if filter is not DEFAULT:
+ kwargs["filter"] = filter
super().__init__(kwargs)
@@ -3535,366 +3996,236 @@ def __init__(
super().__init__(kwargs)
-class FieldSort(AttrDict[Any]):
- """
- :arg missing:
- :arg mode:
- :arg nested:
- :arg order:
- :arg unmapped_type:
- :arg numeric_type:
- :arg format:
+class TextExpansionQuery(QueryBase):
"""
-
- missing: Union[str, int, float, bool, DefaultType]
- mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
- order: Union[Literal["asc", "desc"], DefaultType]
- unmapped_type: Union[
- Literal[
- "none",
- "geo_point",
- "geo_shape",
- "ip",
- "binary",
- "keyword",
- "text",
- "search_as_you_type",
- "date",
- "date_nanos",
- "boolean",
- "completion",
- "nested",
- "object",
- "version",
- "murmur3",
- "token_count",
- "percolator",
- "integer",
- "long",
- "short",
- "byte",
- "float",
- "half_float",
- "scaled_float",
- "double",
- "integer_range",
- "float_range",
- "long_range",
- "double_range",
- "date_range",
- "ip_range",
- "alias",
- "join",
- "rank_feature",
- "rank_features",
- "flattened",
- "shape",
- "histogram",
- "constant_keyword",
- "aggregate_metric_double",
- "dense_vector",
- "semantic_text",
- "sparse_vector",
- "match_only_text",
- "icu_collation_keyword",
- ],
- DefaultType,
- ]
- numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType]
- format: Union[str, DefaultType]
-
- def __init__(
- self,
- *,
- missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
- mode: Union[
- Literal["min", "max", "sum", "avg", "median"], DefaultType
- ] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
- unmapped_type: Union[
- Literal[
- "none",
- "geo_point",
- "geo_shape",
- "ip",
- "binary",
- "keyword",
- "text",
- "search_as_you_type",
- "date",
- "date_nanos",
- "boolean",
- "completion",
- "nested",
- "object",
- "version",
- "murmur3",
- "token_count",
- "percolator",
- "integer",
- "long",
- "short",
- "byte",
- "float",
- "half_float",
- "scaled_float",
- "double",
- "integer_range",
- "float_range",
- "long_range",
- "double_range",
- "date_range",
- "ip_range",
- "alias",
- "join",
- "rank_feature",
- "rank_features",
- "flattened",
- "shape",
- "histogram",
- "constant_keyword",
- "aggregate_metric_double",
- "dense_vector",
- "semantic_text",
- "sparse_vector",
- "match_only_text",
- "icu_collation_keyword",
- ],
- DefaultType,
- ] = DEFAULT,
- numeric_type: Union[
- Literal["long", "double", "date", "date_nanos"], DefaultType
+ :arg model_id: (required) The text expansion NLP model to use
+ :arg model_text: (required) The query text
+ :arg pruning_config: Token pruning configurations
+ :arg boost: Floating point number used to decrease or increase the
+ relevance scores of the query. Boost values are relative to the
+ default value of 1.0. A boost value between 0 and 1.0 decreases
+ the relevance score. A value greater than 1.0 increases the
+ relevance score. Defaults to `1` if omitted.
+ :arg _name:
+ """
+
+ model_id: Union[str, DefaultType]
+ model_text: Union[str, DefaultType]
+ pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
+ boost: Union[float, DefaultType]
+ _name: Union[str, DefaultType]
+
+ def __init__(
+ self,
+ *,
+ model_id: Union[str, DefaultType] = DEFAULT,
+ model_text: Union[str, DefaultType] = DEFAULT,
+ pruning_config: Union[
+ "TokenPruningConfig", Dict[str, Any], DefaultType
] = DEFAULT,
- format: Union[str, DefaultType] = DEFAULT,
+ boost: Union[float, DefaultType] = DEFAULT,
+ _name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if missing is not DEFAULT:
- kwargs["missing"] = missing
- if mode is not DEFAULT:
- kwargs["mode"] = mode
- if nested is not DEFAULT:
- kwargs["nested"] = nested
- if order is not DEFAULT:
- kwargs["order"] = order
- if unmapped_type is not DEFAULT:
- kwargs["unmapped_type"] = unmapped_type
- if numeric_type is not DEFAULT:
- kwargs["numeric_type"] = numeric_type
- if format is not DEFAULT:
- kwargs["format"] = format
- super().__init__(kwargs)
+ if model_id is not DEFAULT:
+ kwargs["model_id"] = model_id
+ if model_text is not DEFAULT:
+ kwargs["model_text"] = model_text
+ if pruning_config is not DEFAULT:
+ kwargs["pruning_config"] = pruning_config
+ if boost is not DEFAULT:
+ kwargs["boost"] = boost
+ if _name is not DEFAULT:
+ kwargs["_name"] = _name
+ super().__init__(**kwargs)
-class ScoreSort(AttrDict[Any]):
+class TokenPruningConfig(AttrDict[Any]):
"""
- :arg order:
+ :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than
+ this threshold times the average frequency of all tokens in the
+ specified field are considered outliers and pruned. Defaults to
+ `5` if omitted.
+ :arg tokens_weight_threshold: Tokens whose weight is less than this
+ threshold are considered nonsignificant and pruned. Defaults to
+ `0.4` if omitted.
+ :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs
+ only scoring kept tokens.
"""
- order: Union[Literal["asc", "desc"], DefaultType]
+ tokens_freq_ratio_threshold: Union[int, DefaultType]
+ tokens_weight_threshold: Union[float, DefaultType]
+ only_score_pruned_tokens: Union[bool, DefaultType]
def __init__(
self,
*,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+ tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT,
+ tokens_weight_threshold: Union[float, DefaultType] = DEFAULT,
+ only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if order is not DEFAULT:
- kwargs["order"] = order
+ if tokens_freq_ratio_threshold is not DEFAULT:
+ kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold
+ if tokens_weight_threshold is not DEFAULT:
+ kwargs["tokens_weight_threshold"] = tokens_weight_threshold
+ if only_score_pruned_tokens is not DEFAULT:
+ kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens
super().__init__(kwargs)
-class GeoDistanceSort(AttrDict[Any]):
+class TopLeftBottomRightGeoBounds(AttrDict[Any]):
"""
- :arg _field: The field to use in this query.
- :arg _value: The query value for the field.
- :arg mode:
- :arg distance_type:
- :arg ignore_unmapped:
- :arg order:
- :arg unit:
- :arg nested:
+ :arg top_left: (required)
+ :arg bottom_right: (required)
"""
- _field: Union[str, "InstrumentedField", "DefaultType"]
- _value: Union[
- Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
- Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]],
+ top_left: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
Dict[str, Any],
- "DefaultType",
+ DefaultType,
]
- mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
- distance_type: Union[Literal["arc", "plane"], DefaultType]
- ignore_unmapped: Union[bool, DefaultType]
- order: Union[Literal["asc", "desc"], DefaultType]
- unit: Union[
- Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ bottom_right: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
def __init__(
self,
- _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
- _value: Union[
- Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
- Sequence[
- Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
- ],
- Dict[str, Any],
- "DefaultType",
- ] = DEFAULT,
*,
- mode: Union[
- Literal["min", "max", "sum", "avg", "median"], DefaultType
+ top_left: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
] = DEFAULT,
- distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT,
- ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
- unit: Union[
- Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+ bottom_right: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
- if _field is not DEFAULT:
- kwargs[str(_field)] = _value
- if mode is not DEFAULT:
- kwargs["mode"] = mode
- if distance_type is not DEFAULT:
- kwargs["distance_type"] = distance_type
- if ignore_unmapped is not DEFAULT:
- kwargs["ignore_unmapped"] = ignore_unmapped
- if order is not DEFAULT:
- kwargs["order"] = order
- if unit is not DEFAULT:
- kwargs["unit"] = unit
- if nested is not DEFAULT:
- kwargs["nested"] = nested
+ if top_left is not DEFAULT:
+ kwargs["top_left"] = top_left
+ if bottom_right is not DEFAULT:
+ kwargs["bottom_right"] = bottom_right
super().__init__(kwargs)
-class ScriptSort(AttrDict[Any]):
+class TopMetricsValue(AttrDict[Any]):
"""
- :arg script: (required)
- :arg order:
- :arg type:
- :arg mode:
- :arg nested:
+ :arg field: (required) A field to return as a metric.
"""
- script: Union["Script", Dict[str, Any], DefaultType]
- order: Union[Literal["asc", "desc"], DefaultType]
- type: Union[Literal["string", "number", "version"], DefaultType]
- mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ field: Union[str, InstrumentedField, DefaultType]
def __init__(
self,
*,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
- type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT,
- mode: Union[
- Literal["min", "max", "sum", "avg", "median"], DefaultType
- ] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+ field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if script is not DEFAULT:
- kwargs["script"] = script
- if order is not DEFAULT:
- kwargs["order"] = order
- if type is not DEFAULT:
- kwargs["type"] = type
- if mode is not DEFAULT:
- kwargs["mode"] = mode
- if nested is not DEFAULT:
- kwargs["nested"] = nested
+ if field is not DEFAULT:
+ kwargs["field"] = str(field)
super().__init__(kwargs)
-class SpanContainingQuery(QueryBase):
+class TopRightBottomLeftGeoBounds(AttrDict[Any]):
"""
- :arg big: (required) Can be any span query. Matching spans from `big`
- that contain matches from `little` are returned.
- :arg little: (required) Can be any span query. Matching spans from
- `big` that contain matches from `little` are returned.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg top_right: (required)
+ :arg bottom_left: (required)
"""
- big: Union["SpanQuery", Dict[str, Any], DefaultType]
- little: Union["SpanQuery", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ top_right: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
+ ]
+ bottom_left: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
+ ]
def __init__(
self,
*,
- big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ top_right: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
+ bottom_left: Union[
+ "LatLonGeoLocation",
+ "GeoHashLocation",
+ Sequence[float],
+ str,
+ Dict[str, Any],
+ DefaultType,
+ ] = DEFAULT,
**kwargs: Any,
):
- if big is not DEFAULT:
- kwargs["big"] = big
- if little is not DEFAULT:
- kwargs["little"] = little
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if top_right is not DEFAULT:
+ kwargs["top_right"] = top_right
+ if bottom_left is not DEFAULT:
+ kwargs["bottom_left"] = bottom_left
+ super().__init__(kwargs)
-class SpanFieldMaskingQuery(QueryBase):
+class WeightedAverageValue(AttrDict[Any]):
"""
- :arg field: (required)
- :arg query: (required)
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg field: The field from which to extract the values or weights.
+ :arg missing: A value or weight to use if the field is missing.
+ :arg script:
"""
field: Union[str, InstrumentedField, DefaultType]
- query: Union["SpanQuery", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ missing: Union[float, DefaultType]
+ script: Union["Script", Dict[str, Any], DefaultType]
def __init__(
self,
*,
field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
+ missing: Union[float, DefaultType] = DEFAULT,
+ script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
**kwargs: Any,
):
if field is not DEFAULT:
kwargs["field"] = str(field)
- if query is not DEFAULT:
- kwargs["query"] = query
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ if missing is not DEFAULT:
+ kwargs["missing"] = missing
+ if script is not DEFAULT:
+ kwargs["script"] = script
+ super().__init__(kwargs)
-class SpanFirstQuery(QueryBase):
+class WeightedTokensQuery(QueryBase):
"""
- :arg end: (required) Controls the maximum end position permitted in a
- match.
- :arg match: (required) Can be any other span type query.
+ :arg tokens: (required) The tokens representing this query
+ :arg pruning_config: Token pruning configurations
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -3903,24 +4234,26 @@ class SpanFirstQuery(QueryBase):
:arg _name:
"""
- end: Union[int, DefaultType]
- match: Union["SpanQuery", Dict[str, Any], DefaultType]
+ tokens: Union[Mapping[str, float], DefaultType]
+ pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- end: Union[int, DefaultType] = DEFAULT,
- match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+ tokens: Union[Mapping[str, float], DefaultType] = DEFAULT,
+ pruning_config: Union[
+ "TokenPruningConfig", Dict[str, Any], DefaultType
+ ] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if end is not DEFAULT:
- kwargs["end"] = end
- if match is not DEFAULT:
- kwargs["match"] = match
+ if tokens is not DEFAULT:
+ kwargs["tokens"] = tokens
+ if pruning_config is not DEFAULT:
+ kwargs["pruning_config"] = pruning_config
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -3928,10 +4261,17 @@ def __init__(
super().__init__(**kwargs)
-class SpanMultiTermQuery(QueryBase):
+class WildcardQuery(QueryBase):
"""
- :arg match: (required) Should be a multi term query (one of
- `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query).
+ :arg case_insensitive: Allows case insensitive matching of the pattern
+ with the indexed field values when set to true. Default is false
+ which means the case sensitivity of matching depends on the
+ underlying field’s mapping.
+ :arg rewrite: Method used to rewrite the query.
+ :arg value: Wildcard pattern for terms you wish to find in the
+ provided field. Required, when wildcard is not set.
+ :arg wildcard: Wildcard pattern for terms you wish to find in the
+ provided field. Required, when value is not set.
:arg boost: Floating point number used to decrease or increase the
relevance scores of the query. Boost values are relative to the
default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -3940,20 +4280,32 @@ class SpanMultiTermQuery(QueryBase):
:arg _name:
"""
- match: Union[Query, DefaultType]
+ case_insensitive: Union[bool, DefaultType]
+ rewrite: Union[str, DefaultType]
+ value: Union[str, DefaultType]
+ wildcard: Union[str, DefaultType]
boost: Union[float, DefaultType]
_name: Union[str, DefaultType]
def __init__(
self,
*,
- match: Union[Query, DefaultType] = DEFAULT,
+ case_insensitive: Union[bool, DefaultType] = DEFAULT,
+ rewrite: Union[str, DefaultType] = DEFAULT,
+ value: Union[str, DefaultType] = DEFAULT,
+ wildcard: Union[str, DefaultType] = DEFAULT,
boost: Union[float, DefaultType] = DEFAULT,
_name: Union[str, DefaultType] = DEFAULT,
**kwargs: Any,
):
- if match is not DEFAULT:
- kwargs["match"] = match
+ if case_insensitive is not DEFAULT:
+ kwargs["case_insensitive"] = case_insensitive
+ if rewrite is not DEFAULT:
+ kwargs["rewrite"] = rewrite
+ if value is not DEFAULT:
+ kwargs["value"] = value
+ if wildcard is not DEFAULT:
+ kwargs["wildcard"] = wildcard
if boost is not DEFAULT:
kwargs["boost"] = boost
if _name is not DEFAULT:
@@ -3961,366 +4313,698 @@ def __init__(
super().__init__(**kwargs)
-class SpanNearQuery(QueryBase):
+class WktGeoBounds(AttrDict[Any]):
"""
- :arg clauses: (required) Array of one or more other span type queries.
- :arg in_order: Controls whether matches are required to be in-order.
- :arg slop: Controls the maximum number of intervening unmatched
- positions permitted.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg wkt: (required)
"""
- clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
- in_order: Union[bool, DefaultType]
- slop: Union[int, DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ wkt: Union[str, DefaultType]
- def __init__(
- self,
- *,
- clauses: Union[
- Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
- ] = DEFAULT,
- in_order: Union[bool, DefaultType] = DEFAULT,
- slop: Union[int, DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if clauses is not DEFAULT:
- kwargs["clauses"] = clauses
- if in_order is not DEFAULT:
- kwargs["in_order"] = in_order
- if slop is not DEFAULT:
- kwargs["slop"] = slop
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+ def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
+ if wkt is not DEFAULT:
+ kwargs["wkt"] = wkt
+ super().__init__(kwargs)
-class SpanNotQuery(QueryBase):
+class AggregationBreakdown(AttrDict[Any]):
+ """
+ :arg build_aggregation: (required)
+ :arg build_aggregation_count: (required)
+ :arg build_leaf_collector: (required)
+ :arg build_leaf_collector_count: (required)
+ :arg collect: (required)
+ :arg collect_count: (required)
+ :arg initialize: (required)
+ :arg initialize_count: (required)
+ :arg reduce: (required)
+ :arg reduce_count: (required)
+ :arg post_collection:
+ :arg post_collection_count:
+ """
+
+ build_aggregation: int
+ build_aggregation_count: int
+ build_leaf_collector: int
+ build_leaf_collector_count: int
+ collect: int
+ collect_count: int
+ initialize: int
+ initialize_count: int
+ reduce: int
+ reduce_count: int
+ post_collection: int
+ post_collection_count: int
+
+
+class AggregationProfile(AttrDict[Any]):
+ """
+ :arg breakdown: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg type: (required)
+ :arg debug:
+ :arg children:
+ """
+
+ breakdown: "AggregationBreakdown"
+ description: str
+ time_in_nanos: Any
+ type: str
+ debug: "AggregationProfileDebug"
+ children: Sequence["AggregationProfile"]
+
+
+class AggregationProfileDebug(AttrDict[Any]):
+ """
+ :arg segments_with_multi_valued_ords:
+ :arg collection_strategy:
+ :arg segments_with_single_valued_ords:
+ :arg total_buckets:
+ :arg built_buckets:
+ :arg result_strategy:
+ :arg has_filter:
+ :arg delegate:
+ :arg delegate_debug:
+ :arg chars_fetched:
+ :arg extract_count:
+ :arg extract_ns:
+ :arg values_fetched:
+ :arg collect_analyzed_ns:
+ :arg collect_analyzed_count:
+ :arg surviving_buckets:
+ :arg ordinals_collectors_used:
+ :arg ordinals_collectors_overhead_too_high:
+ :arg string_hashing_collectors_used:
+ :arg numeric_collectors_used:
+ :arg empty_collectors_used:
+ :arg deferred_aggregators:
+ :arg segments_with_doc_count_field:
+ :arg segments_with_deleted_docs:
+ :arg filters:
+ :arg segments_counted:
+ :arg segments_collected:
+ :arg map_reducer:
+ :arg brute_force_used:
+ :arg dynamic_pruning_attempted:
+ :arg dynamic_pruning_used:
+ :arg skipped_due_to_no_data:
+ """
+
+ segments_with_multi_valued_ords: int
+ collection_strategy: str
+ segments_with_single_valued_ords: int
+ total_buckets: int
+ built_buckets: int
+ result_strategy: str
+ has_filter: bool
+ delegate: str
+ delegate_debug: "AggregationProfileDebug"
+ chars_fetched: int
+ extract_count: int
+ extract_ns: int
+ values_fetched: int
+ collect_analyzed_ns: int
+ collect_analyzed_count: int
+ surviving_buckets: int
+ ordinals_collectors_used: int
+ ordinals_collectors_overhead_too_high: int
+ string_hashing_collectors_used: int
+ numeric_collectors_used: int
+ empty_collectors_used: int
+ deferred_aggregators: Sequence[str]
+ segments_with_doc_count_field: int
+ segments_with_deleted_docs: int
+ filters: Sequence["AggregationProfileDelegateDebugFilter"]
+ segments_counted: int
+ segments_collected: int
+ map_reducer: str
+ brute_force_used: int
+ dynamic_pruning_attempted: int
+ dynamic_pruning_used: int
+ skipped_due_to_no_data: int
+
+
+class AggregationProfileDelegateDebugFilter(AttrDict[Any]):
+ """
+ :arg results_from_metadata:
+ :arg query:
+ :arg specialized_for:
+ :arg segments_counted_in_constant_time:
+ """
+
+ results_from_metadata: int
+ query: str
+ specialized_for: str
+ segments_counted_in_constant_time: int
+
+
+class BulkIndexByScrollFailure(AttrDict[Any]):
+ """
+ :arg cause: (required)
+ :arg id: (required)
+ :arg index: (required)
+ :arg status: (required)
+ :arg type: (required)
"""
- :arg exclude: (required) Span query whose matches must not overlap
- those returned.
- :arg include: (required) Span query whose matches are filtered.
- :arg dist: The number of tokens from within the include span that
- can’t have overlap with the exclude span. Equivalent to setting
- both `pre` and `post`.
- :arg post: The number of tokens after the include span that can’t have
- overlap with the exclude span.
- :arg pre: The number of tokens before the include span that can’t have
- overlap with the exclude span.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+
+ cause: "ErrorCause"
+ id: str
+ index: str
+ status: int
+ type: str
+
+
+class ClusterDetails(AttrDict[Any]):
+ """
+ :arg status: (required)
+ :arg indices: (required)
+ :arg timed_out: (required)
+ :arg took:
+ :arg _shards:
+ :arg failures:
"""
- exclude: Union["SpanQuery", Dict[str, Any], DefaultType]
- include: Union["SpanQuery", Dict[str, Any], DefaultType]
- dist: Union[int, DefaultType]
- post: Union[int, DefaultType]
- pre: Union[int, DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ status: Literal["running", "successful", "partial", "skipped", "failed"]
+ indices: str
+ timed_out: bool
+ took: Any
+ _shards: "ShardStatistics"
+ failures: Sequence["ShardFailure"]
- def __init__(
- self,
- *,
- exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- dist: Union[int, DefaultType] = DEFAULT,
- post: Union[int, DefaultType] = DEFAULT,
- pre: Union[int, DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if exclude is not DEFAULT:
- kwargs["exclude"] = exclude
- if include is not DEFAULT:
- kwargs["include"] = include
- if dist is not DEFAULT:
- kwargs["dist"] = dist
- if post is not DEFAULT:
- kwargs["post"] = post
- if pre is not DEFAULT:
- kwargs["pre"] = pre
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+
+class ClusterStatistics(AttrDict[Any]):
+ """
+ :arg skipped: (required)
+ :arg successful: (required)
+ :arg total: (required)
+ :arg running: (required)
+ :arg partial: (required)
+ :arg failed: (required)
+ :arg details:
+ """
+
+ skipped: int
+ successful: int
+ total: int
+ running: int
+ partial: int
+ failed: int
+ details: Mapping[str, "ClusterDetails"]
+
+
+class Collector(AttrDict[Any]):
+ """
+ :arg name: (required)
+ :arg reason: (required)
+ :arg time_in_nanos: (required)
+ :arg children:
+ """
+
+ name: str
+ reason: str
+ time_in_nanos: Any
+ children: Sequence["Collector"]
+
+
+class SuggestBase(AttrDict[Any]):
+ """
+ :arg length: (required)
+ :arg offset: (required)
+ :arg text: (required)
+ """
+
+ length: int
+ offset: int
+ text: str
+
+
+class CompletionSuggest(SuggestBase):
+ """
+ :arg options: (required)
+ :arg length: (required)
+ :arg offset: (required)
+ :arg text: (required)
+ """
+
+ options: Sequence["CompletionSuggestOption"]
+ length: int
+ offset: int
+ text: str
+
+
+class CompletionSuggestOption(AttrDict[Any]):
+ """
+ :arg text: (required)
+ :arg collate_match:
+ :arg contexts:
+ :arg fields:
+ :arg _id:
+ :arg _index:
+ :arg _routing:
+ :arg _score:
+ :arg _source:
+ :arg score:
+ """
+
+ text: str
+ collate_match: bool
+ contexts: Mapping[
+ str,
+ Sequence[
+ Union[
+ str, Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+ ]
+ ],
+ ]
+ fields: Mapping[str, Any]
+ _id: str
+ _index: str
+ _routing: str
+ _score: float
+ _source: Any
+ score: float
+
+
+class DfsKnnProfile(AttrDict[Any]):
+ """
+ :arg query: (required)
+ :arg rewrite_time: (required)
+ :arg collector: (required)
+ :arg vector_operations_count:
+ """
+
+ query: Sequence["KnnQueryProfileResult"]
+ rewrite_time: int
+ collector: Sequence["KnnCollectorResult"]
+ vector_operations_count: int
+
+
+class DfsProfile(AttrDict[Any]):
+ """
+ :arg statistics:
+ :arg knn:
+ """
+
+ statistics: "DfsStatisticsProfile"
+ knn: Sequence["DfsKnnProfile"]
+
+
+class DfsStatisticsBreakdown(AttrDict[Any]):
+ """
+ :arg collection_statistics: (required)
+ :arg collection_statistics_count: (required)
+ :arg create_weight: (required)
+ :arg create_weight_count: (required)
+ :arg rewrite: (required)
+ :arg rewrite_count: (required)
+ :arg term_statistics: (required)
+ :arg term_statistics_count: (required)
+ """
+
+ collection_statistics: int
+ collection_statistics_count: int
+ create_weight: int
+ create_weight_count: int
+ rewrite: int
+ rewrite_count: int
+ term_statistics: int
+ term_statistics_count: int
+
+
+class DfsStatisticsProfile(AttrDict[Any]):
+ """
+ :arg type: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg breakdown: (required)
+ :arg time:
+ :arg debug:
+ :arg children:
+ """
+
+ type: str
+ description: str
+ time_in_nanos: Any
+ breakdown: "DfsStatisticsBreakdown"
+ time: Any
+ debug: Mapping[str, Any]
+ children: Sequence["DfsStatisticsProfile"]
+
+
+class ErrorCause(AttrDict[Any]):
+ """
+ :arg type: (required) The type of error
+ :arg reason: A human-readable explanation of the error, in english
+ :arg stack_trace: The server stack trace. Present only if the
+ `error_trace=true` parameter was sent with the request.
+ :arg caused_by:
+ :arg root_cause:
+ :arg suppressed:
+ """
+
+ type: str
+ reason: str
+ stack_trace: str
+ caused_by: "ErrorCause"
+ root_cause: Sequence["ErrorCause"]
+ suppressed: Sequence["ErrorCause"]
-class SpanOrQuery(QueryBase):
+class FetchProfile(AttrDict[Any]):
"""
- :arg clauses: (required) Array of one or more other span type queries.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg type: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg breakdown: (required)
+ :arg debug:
+ :arg children:
"""
- clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ type: str
+ description: str
+ time_in_nanos: Any
+ breakdown: "FetchProfileBreakdown"
+ debug: "FetchProfileDebug"
+ children: Sequence["FetchProfile"]
- def __init__(
- self,
- *,
- clauses: Union[
- Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
- ] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if clauses is not DEFAULT:
- kwargs["clauses"] = clauses
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+class FetchProfileBreakdown(AttrDict[Any]):
+ """
+ :arg load_source:
+ :arg load_source_count:
+ :arg load_stored_fields:
+ :arg load_stored_fields_count:
+ :arg next_reader:
+ :arg next_reader_count:
+ :arg process_count:
+ :arg process:
+ """
-class SpanWithinQuery(QueryBase):
+ load_source: int
+ load_source_count: int
+ load_stored_fields: int
+ load_stored_fields_count: int
+ next_reader: int
+ next_reader_count: int
+ process_count: int
+ process: int
+
+
+class FetchProfileDebug(AttrDict[Any]):
"""
- :arg big: (required) Can be any span query. Matching spans from
- `little` that are enclosed within `big` are returned.
- :arg little: (required) Can be any span query. Matching spans from
- `little` that are enclosed within `big` are returned.
- :arg boost: Floating point number used to decrease or increase the
- relevance scores of the query. Boost values are relative to the
- default value of 1.0. A boost value between 0 and 1.0 decreases
- the relevance score. A value greater than 1.0 increases the
- relevance score. Defaults to `1` if omitted.
- :arg _name:
+ :arg stored_fields:
+ :arg fast_path:
+ """
+
+ stored_fields: Sequence[str]
+ fast_path: int
+
+
+class KnnCollectorResult(AttrDict[Any]):
+ """
+ :arg name: (required)
+ :arg reason: (required)
+ :arg time_in_nanos: (required)
+ :arg time:
+ :arg children:
+ """
+
+ name: str
+ reason: str
+ time_in_nanos: Any
+ time: Any
+ children: Sequence["KnnCollectorResult"]
+
+
+class KnnQueryProfileBreakdown(AttrDict[Any]):
+ """
+ :arg advance: (required)
+ :arg advance_count: (required)
+ :arg build_scorer: (required)
+ :arg build_scorer_count: (required)
+ :arg compute_max_score: (required)
+ :arg compute_max_score_count: (required)
+ :arg count_weight: (required)
+ :arg count_weight_count: (required)
+ :arg create_weight: (required)
+ :arg create_weight_count: (required)
+ :arg match: (required)
+ :arg match_count: (required)
+ :arg next_doc: (required)
+ :arg next_doc_count: (required)
+ :arg score: (required)
+ :arg score_count: (required)
+ :arg set_min_competitive_score: (required)
+ :arg set_min_competitive_score_count: (required)
+ :arg shallow_advance: (required)
+ :arg shallow_advance_count: (required)
+ """
+
+ advance: int
+ advance_count: int
+ build_scorer: int
+ build_scorer_count: int
+ compute_max_score: int
+ compute_max_score_count: int
+ count_weight: int
+ count_weight_count: int
+ create_weight: int
+ create_weight_count: int
+ match: int
+ match_count: int
+ next_doc: int
+ next_doc_count: int
+ score: int
+ score_count: int
+ set_min_competitive_score: int
+ set_min_competitive_score_count: int
+ shallow_advance: int
+ shallow_advance_count: int
+
+
+class KnnQueryProfileResult(AttrDict[Any]):
+ """
+ :arg type: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg breakdown: (required)
+ :arg time:
+ :arg debug:
+ :arg children:
+ """
+
+ type: str
+ description: str
+ time_in_nanos: Any
+ breakdown: "KnnQueryProfileBreakdown"
+ time: Any
+ debug: Mapping[str, Any]
+ children: Sequence["KnnQueryProfileResult"]
+
+
+class PhraseSuggest(SuggestBase):
+ """
+ :arg options: (required)
+ :arg length: (required)
+ :arg offset: (required)
+ :arg text: (required)
"""
- big: Union["SpanQuery", Dict[str, Any], DefaultType]
- little: Union["SpanQuery", Dict[str, Any], DefaultType]
- boost: Union[float, DefaultType]
- _name: Union[str, DefaultType]
+ options: Sequence["PhraseSuggestOption"]
+ length: int
+ offset: int
+ text: str
- def __init__(
- self,
- *,
- big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
- boost: Union[float, DefaultType] = DEFAULT,
- _name: Union[str, DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if big is not DEFAULT:
- kwargs["big"] = big
- if little is not DEFAULT:
- kwargs["little"] = little
- if boost is not DEFAULT:
- kwargs["boost"] = boost
- if _name is not DEFAULT:
- kwargs["_name"] = _name
- super().__init__(**kwargs)
+class PhraseSuggestOption(AttrDict[Any]):
+ """
+ :arg text: (required)
+ :arg score: (required)
+ :arg highlighted:
+ :arg collate_match:
+ """
+
+ text: str
+ score: float
+ highlighted: str
+ collate_match: bool
+
+
+class Profile(AttrDict[Any]):
+ """
+ :arg shards: (required)
+ """
+
+ shards: Sequence["ShardProfile"]
+
+
+class QueryBreakdown(AttrDict[Any]):
+ """
+ :arg advance: (required)
+ :arg advance_count: (required)
+ :arg build_scorer: (required)
+ :arg build_scorer_count: (required)
+ :arg create_weight: (required)
+ :arg create_weight_count: (required)
+ :arg match: (required)
+ :arg match_count: (required)
+ :arg shallow_advance: (required)
+ :arg shallow_advance_count: (required)
+ :arg next_doc: (required)
+ :arg next_doc_count: (required)
+ :arg score: (required)
+ :arg score_count: (required)
+ :arg compute_max_score: (required)
+ :arg compute_max_score_count: (required)
+ :arg count_weight: (required)
+ :arg count_weight_count: (required)
+ :arg set_min_competitive_score: (required)
+ :arg set_min_competitive_score_count: (required)
+ """
-class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]):
+ advance: int
+ advance_count: int
+ build_scorer: int
+ build_scorer_count: int
+ create_weight: int
+ create_weight_count: int
+ match: int
+ match_count: int
+ shallow_advance: int
+ shallow_advance_count: int
+ next_doc: int
+ next_doc_count: int
+ score: int
+ score_count: int
+ compute_max_score: int
+ compute_max_score_count: int
+ count_weight: int
+ count_weight_count: int
+ set_min_competitive_score: int
+ set_min_competitive_score_count: int
+
+
+class QueryProfile(AttrDict[Any]):
"""
- :arg doc_count: (required) The total number of documents that
- initially created the expectations. It’s required to be greater
- than or equal to the sum of all values in the buckets_path as this
- is the originating superset of data to which the term values are
- correlated.
- :arg expectations: (required) An array of numbers with which to
- correlate the configured `bucket_path` values. The length of this
- value must always equal the number of buckets returned by the
- `bucket_path`.
- :arg fractions: An array of fractions to use when averaging and
- calculating variance. This should be used if the pre-calculated
- data and the buckets_path have known gaps. The length of
- fractions, if provided, must equal expectations.
+ :arg breakdown: (required)
+ :arg description: (required)
+ :arg time_in_nanos: (required)
+ :arg type: (required)
+ :arg children:
"""
- doc_count: Union[int, DefaultType]
- expectations: Union[Sequence[float], DefaultType]
- fractions: Union[Sequence[float], DefaultType]
+ breakdown: "QueryBreakdown"
+ description: str
+ time_in_nanos: Any
+ type: str
+ children: Sequence["QueryProfile"]
- def __init__(
- self,
- *,
- doc_count: Union[int, DefaultType] = DEFAULT,
- expectations: Union[Sequence[float], DefaultType] = DEFAULT,
- fractions: Union[Sequence[float], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if doc_count is not DEFAULT:
- kwargs["doc_count"] = doc_count
- if expectations is not DEFAULT:
- kwargs["expectations"] = expectations
- if fractions is not DEFAULT:
- kwargs["fractions"] = fractions
- super().__init__(kwargs)
+class Retries(AttrDict[Any]):
+ """
+ :arg bulk: (required)
+ :arg search: (required)
+ """
+
+ bulk: int
+ search: int
-class IntervalsContainer(AttrDict[Any]):
+
+class SearchProfile(AttrDict[Any]):
"""
- :arg all_of: Returns matches that span a combination of other rules.
- :arg any_of: Returns intervals produced by any of its sub-rules.
- :arg fuzzy: Matches analyzed text.
- :arg match: Matches analyzed text.
- :arg prefix: Matches terms that start with a specified set of
- characters.
- :arg wildcard: Matches terms using a wildcard pattern.
+ :arg collector: (required)
+ :arg query: (required)
+ :arg rewrite_time: (required)
"""
- all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType]
- any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType]
- fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType]
- match: Union["IntervalsMatch", Dict[str, Any], DefaultType]
- prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType]
- wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType]
+ collector: Sequence["Collector"]
+ query: Sequence["QueryProfile"]
+ rewrite_time: int
- def __init__(
- self,
- *,
- all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT,
- any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT,
- fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT,
- match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT,
- prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT,
- wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if all_of is not DEFAULT:
- kwargs["all_of"] = all_of
- if any_of is not DEFAULT:
- kwargs["any_of"] = any_of
- if fuzzy is not DEFAULT:
- kwargs["fuzzy"] = fuzzy
- if match is not DEFAULT:
- kwargs["match"] = match
- if prefix is not DEFAULT:
- kwargs["prefix"] = prefix
- if wildcard is not DEFAULT:
- kwargs["wildcard"] = wildcard
- super().__init__(kwargs)
+class ShardFailure(AttrDict[Any]):
+ """
+ :arg reason: (required)
+ :arg shard: (required)
+ :arg index:
+ :arg node:
+ :arg status:
+ """
+
+ reason: "ErrorCause"
+ shard: int
+ index: str
+ node: str
+ status: str
-class IntervalsFilter(AttrDict[Any]):
+
+class ShardProfile(AttrDict[Any]):
"""
- :arg after: Query used to return intervals that follow an interval
- from the `filter` rule.
- :arg before: Query used to return intervals that occur before an
- interval from the `filter` rule.
- :arg contained_by: Query used to return intervals contained by an
- interval from the `filter` rule.
- :arg containing: Query used to return intervals that contain an
- interval from the `filter` rule.
- :arg not_contained_by: Query used to return intervals that are **not**
- contained by an interval from the `filter` rule.
- :arg not_containing: Query used to return intervals that do **not**
- contain an interval from the `filter` rule.
- :arg not_overlapping: Query used to return intervals that do **not**
- overlap with an interval from the `filter` rule.
- :arg overlapping: Query used to return intervals that overlap with an
- interval from the `filter` rule.
- :arg script: Script used to return matching documents. This script
- must return a boolean value: `true` or `false`.
+ :arg aggregations: (required)
+ :arg cluster: (required)
+ :arg id: (required)
+ :arg index: (required)
+ :arg node_id: (required)
+ :arg searches: (required)
+ :arg shard_id: (required)
+ :arg dfs:
+ :arg fetch:
"""
- after: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- before: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
- script: Union["Script", Dict[str, Any], DefaultType]
+ aggregations: Sequence["AggregationProfile"]
+ cluster: str
+ id: str
+ index: str
+ node_id: str
+ searches: Sequence["SearchProfile"]
+ shard_id: int
+ dfs: "DfsProfile"
+ fetch: "FetchProfile"
- def __init__(
- self,
- *,
- after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
- before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
- contained_by: Union[
- "IntervalsContainer", Dict[str, Any], DefaultType
- ] = DEFAULT,
- containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
- not_contained_by: Union[
- "IntervalsContainer", Dict[str, Any], DefaultType
- ] = DEFAULT,
- not_containing: Union[
- "IntervalsContainer", Dict[str, Any], DefaultType
- ] = DEFAULT,
- not_overlapping: Union[
- "IntervalsContainer", Dict[str, Any], DefaultType
- ] = DEFAULT,
- overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
- script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if after is not DEFAULT:
- kwargs["after"] = after
- if before is not DEFAULT:
- kwargs["before"] = before
- if contained_by is not DEFAULT:
- kwargs["contained_by"] = contained_by
- if containing is not DEFAULT:
- kwargs["containing"] = containing
- if not_contained_by is not DEFAULT:
- kwargs["not_contained_by"] = not_contained_by
- if not_containing is not DEFAULT:
- kwargs["not_containing"] = not_containing
- if not_overlapping is not DEFAULT:
- kwargs["not_overlapping"] = not_overlapping
- if overlapping is not DEFAULT:
- kwargs["overlapping"] = overlapping
- if script is not DEFAULT:
- kwargs["script"] = script
- super().__init__(kwargs)
+class ShardStatistics(AttrDict[Any]):
+ """
+ :arg failed: (required)
+ :arg successful: (required) Indicates how many shards have
+ successfully run the search.
+ :arg total: (required) Indicates how many shards the search will run
+ on overall.
+ :arg failures:
+ :arg skipped:
+ """
-class NestedSortValue(AttrDict[Any]):
+ failed: int
+ successful: int
+ total: int
+ failures: Sequence["ShardFailure"]
+ skipped: int
+
+
+class TermSuggest(SuggestBase):
"""
- :arg path: (required)
- :arg filter:
- :arg max_children:
- :arg nested:
+ :arg options: (required)
+ :arg length: (required)
+ :arg offset: (required)
+ :arg text: (required)
"""
- path: Union[str, InstrumentedField, DefaultType]
- filter: Union[Query, DefaultType]
- max_children: Union[int, DefaultType]
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+ options: Sequence["TermSuggestOption"]
+ length: int
+ offset: int
+ text: str
- def __init__(
- self,
- *,
- path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
- filter: Union[Query, DefaultType] = DEFAULT,
- max_children: Union[int, DefaultType] = DEFAULT,
- nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
- **kwargs: Any,
- ):
- if path is not DEFAULT:
- kwargs["path"] = str(path)
- if filter is not DEFAULT:
- kwargs["filter"] = filter
- if max_children is not DEFAULT:
- kwargs["max_children"] = max_children
- if nested is not DEFAULT:
- kwargs["nested"] = nested
- super().__init__(kwargs)
+
+class TermSuggestOption(AttrDict[Any]):
+ """
+ :arg text: (required)
+ :arg score: (required)
+ :arg freq: (required)
+ :arg highlighted:
+ :arg collate_match:
+ """
+
+ text: str
+ score: float
+ freq: int
+ highlighted: str
+ collate_match: bool
diff --git a/examples/async/completion.py b/examples/async/completion.py
index a7a7a79e..dd8fe721 100644
--- a/examples/async/completion.py
+++ b/examples/async/completion.py
@@ -103,7 +103,7 @@ async def main() -> None:
response = await s.execute()
# print out all the options we got
- for option in response.suggest.auto_complete[0].options:
+ for option in response.suggest["auto_complete"][0].options:
print("%10s: %25s (%d)" % (text, option._source.name, option._score))
# close the connection
diff --git a/examples/completion.py b/examples/completion.py
index 81d1b0e4..ccbd4502 100644
--- a/examples/completion.py
+++ b/examples/completion.py
@@ -102,7 +102,7 @@ def main() -> None:
response = s.execute()
# print out all the options we got
- for option in response.suggest.auto_complete[0].options:
+ for option in response.suggest["auto_complete"][0].options:
print("%10s: %25s (%d)" % (text, option._source.name, option._score))
# close the connection
diff --git a/tests/test_integration/test_examples/_async/test_completion.py b/tests/test_integration/test_examples/_async/test_completion.py
index 5b890b3d..e9716c1d 100644
--- a/tests/test_integration/test_examples/_async/test_completion.py
+++ b/tests/test_integration/test_examples/_async/test_completion.py
@@ -32,7 +32,7 @@ async def test_person_suggests_on_all_variants_of_name(
s = Person.search().suggest("t", "kra", completion={"field": "suggest"})
response = await s.execute()
- opts = response.suggest.t[0].options
+ opts = response.suggest["t"][0].options
assert 1 == len(opts)
assert opts[0]._score == 42
diff --git a/tests/test_integration/test_examples/_sync/test_completion.py b/tests/test_integration/test_examples/_sync/test_completion.py
index 2e922710..6dec13e2 100644
--- a/tests/test_integration/test_examples/_sync/test_completion.py
+++ b/tests/test_integration/test_examples/_sync/test_completion.py
@@ -32,7 +32,7 @@ def test_person_suggests_on_all_variants_of_name(
s = Person.search().suggest("t", "kra", completion={"field": "suggest"})
response = s.execute()
- opts = response.suggest.t[0].options
+ opts = response.suggest["t"][0].options
assert 1 == len(opts)
assert opts[0]._score == 42
diff --git a/utils/generator.py b/utils/generator.py
index aeeaa9d8..39a3eaaf 100644
--- a/utils/generator.py
+++ b/utils/generator.py
@@ -33,6 +33,7 @@
)
query_py = jinja_env.get_template("query.py.tpl")
aggs_py = jinja_env.get_template("aggs.py.tpl")
+response_init_py = jinja_env.get_template("response.__init__.py.tpl")
types_py = jinja_env.get_template("types.py.tpl")
# map with name replacements for Elasticsearch attributes
@@ -119,6 +120,7 @@ def __init__(self):
# Any interfaces collected here are then rendered as Python in the
# types.py module.
self.interfaces = []
+ self.response_interfaces = []
def find_type(self, name, namespace=None):
for t in self.schema["types"]:
@@ -139,13 +141,20 @@ def inherits_from(self, type_, name, namespace=None):
return True
return False
- def get_python_type(self, schema_type):
+ def get_python_type(self, schema_type, for_response=False):
"""Obtain Python typing details for a given schema type
This method returns a tuple. The first element is a string with the
Python type hint. The second element is a dictionary with Python DSL
specific typing details to be stored in the DslBase._param_defs
attribute (or None if the type does not need to be in _param_defs).
+
+ When `for_response` is `False`, any new interfaces that are discovered
+ are registered to be generated in "request" style, with alternative
+ Dict type hints and default values. If `for_response` is `True`,
+ interfaces are generated just with their declared type, without
+ Dict alternative and without defaults, to help type checkers be more
+ effective at parsing response expressions.
"""
if schema_type["kind"] == "instance_of":
type_name = schema_type["type"]
@@ -167,7 +176,8 @@ def get_python_type(self, schema_type):
else:
# not an instance of a native type, so we get the type and try again
return self.get_python_type(
- self.find_type(type_name["name"], type_name["namespace"])
+ self.find_type(type_name["name"], type_name["namespace"]),
+ for_response=for_response,
)
elif (
type_name["namespace"] == "_types.query_dsl"
@@ -190,21 +200,27 @@ def get_python_type(self, schema_type):
# for any other instances we get the type and recurse
type_ = self.find_type(type_name["name"], type_name["namespace"])
if type_:
- return self.get_python_type(type_)
+ return self.get_python_type(type_, for_response=for_response)
elif schema_type["kind"] == "type_alias":
# for an alias, we use the aliased type
- return self.get_python_type(schema_type["type"])
+ return self.get_python_type(schema_type["type"], for_response=for_response)
elif schema_type["kind"] == "array_of":
# for arrays we use Sequence[element_type]
- type_, param = self.get_python_type(schema_type["value"])
+ type_, param = self.get_python_type(
+ schema_type["value"], for_response=for_response
+ )
return f"Sequence[{type_}]", {**param, "multi": True} if param else None
elif schema_type["kind"] == "dictionary_of":
# for dicts we use Mapping[key_type, value_type]
- key_type, key_param = self.get_python_type(schema_type["key"])
- value_type, value_param = self.get_python_type(schema_type["value"])
+ key_type, key_param = self.get_python_type(
+ schema_type["key"], for_response=for_response
+ )
+ value_type, value_param = self.get_python_type(
+ schema_type["value"], for_response=for_response
+ )
return f"Mapping[{key_type}, {value_type}]", (
{**value_param, "hash": True} if value_param else None
)
@@ -217,11 +233,25 @@ def get_python_type(self, schema_type):
and schema_type["items"][0] == schema_type["items"][1]["value"]
):
# special kind of unions in the form Union[type, Sequence[type]]
- type_, param = self.get_python_type(schema_type["items"][0])
- return (
- f"Union[{type_}, Sequence[{type_}]]",
- ({"type": param["type"], "multi": True} if param else None),
+ type_, param = self.get_python_type(
+ schema_type["items"][0], for_response=for_response
)
+ if schema_type["items"][0]["type"]["name"] in [
+ "CompletionSuggestOption",
+ "PhraseSuggestOption",
+ "TermSuggestOption",
+ ]:
+ # for suggest types we simplify this type and return just the array form
+ return (
+ f"Sequence[{type_}]",
+ ({"type": param["type"], "multi": True} if param else None),
+ )
+ else:
+ # for every other types we produce an union with the two alternatives
+ return (
+ f"Union[{type_}, Sequence[{type_}]]",
+ ({"type": param["type"], "multi": True} if param else None),
+ )
elif (
len(schema_type["items"]) == 2
and schema_type["items"][0]["kind"] == "instance_of"
@@ -239,7 +269,10 @@ def get_python_type(self, schema_type):
# generic union type
types = list(
dict.fromkeys( # eliminate duplicates
- [self.get_python_type(t) for t in schema_type["items"]]
+ [
+ self.get_python_type(t, for_response=for_response)
+ for t in schema_type["items"]
+ ]
)
)
return "Union[" + ", ".join([type_ for type_, _ in types]) + "]", None
@@ -279,6 +312,8 @@ def get_python_type(self, schema_type):
# and add the interface to the interfaces.py module
if schema_type["name"]["name"] not in self.interfaces:
self.interfaces.append(schema_type["name"]["name"])
+ if for_response:
+ self.response_interfaces.append(schema_type["name"]["name"])
return f"\"types.{schema_type['name']['name']}\"", None
elif schema_type["kind"] == "user_defined_value":
# user_defined_value maps to Python's Any type
@@ -286,7 +321,7 @@ def get_python_type(self, schema_type):
raise RuntimeError(f"Cannot find Python type for {schema_type}")
- def add_attribute(self, k, arg, for_types_py=False):
+ def add_attribute(self, k, arg, for_types_py=False, for_response=False):
"""Add an attribute to the internal representation of a class.
This method adds the argument `arg` to the data structure for a class
@@ -302,18 +337,25 @@ def add_attribute(self, k, arg, for_types_py=False):
are kept to prevent forward references, but the "types." namespace is
removed. When `for_types_py` is `False`, all non-native types use
quotes and are namespaced.
+
+ When `for_response` is `True`, type hints are not given the optional
+ dictionary representation, nor the `DefaultType` used for omitted
+ attributes.
"""
try:
- type_, param = schema.get_python_type(arg["type"])
+ type_, param = schema.get_python_type(
+ arg["type"], for_response=for_response
+ )
except RuntimeError:
type_ = "Any"
param = None
- if type_ != "Any":
- if 'Sequence["types.' in type_:
- type_ = add_seq_dict_type(type_) # interfaces can be given as dicts
- elif "types." in type_:
- type_ = add_dict_type(type_) # interfaces can be given as dicts
- type_ = add_not_set(type_)
+ if not for_response:
+ if type_ != "Any":
+ if 'Sequence["types.' in type_:
+ type_ = add_seq_dict_type(type_) # interfaces can be given as dicts
+ elif "types." in type_:
+ type_ = add_dict_type(type_) # interfaces can be given as dicts
+ type_ = add_not_set(type_)
if for_types_py:
type_ = type_.replace('"DefaultType"', "DefaultType")
type_ = type_.replace('"InstrumentedField"', "InstrumentedField")
@@ -355,7 +397,7 @@ def add_attribute(self, k, arg, for_types_py=False):
if param and "params" in k:
k["params"].append(param)
- def add_behaviors(self, type_, k, for_types_py=False):
+ def add_behaviors(self, type_, k, for_types_py=False, for_response=False):
"""Add behaviors reported in the specification of the given type to the
class representation.
"""
@@ -367,9 +409,13 @@ class representation.
):
# we do not support this behavior, so we ignore it
continue
- key_type, _ = schema.get_python_type(behavior["generics"][0])
+ key_type, _ = schema.get_python_type(
+ behavior["generics"][0], for_response=for_response
+ )
if "InstrumentedField" in key_type:
- value_type, _ = schema.get_python_type(behavior["generics"][1])
+ value_type, _ = schema.get_python_type(
+ behavior["generics"][1], for_response=for_response
+ )
if for_types_py:
value_type = value_type.replace('"DefaultType"', "DefaultType")
value_type = value_type.replace(
@@ -491,7 +537,8 @@ def property_to_python_class(self, p):
# for unions we create sub-classes
for other in type_["type"]["items"]:
other_class = self.interface_to_python_class(
- other["type"]["name"], self.interfaces, for_types_py=False
+ other["type"]["name"],
+ for_types_py=False,
)
other_class["parent"] = k["name"]
other_classes.append(other_class)
@@ -549,7 +596,14 @@ def property_to_python_class(self, p):
raise RuntimeError(f"Cannot generate code for type {p['type']}")
return [k] + other_classes
- def interface_to_python_class(self, interface, interfaces, for_types_py=True):
+ def interface_to_python_class(
+ self,
+ interface,
+ namespace=None,
+ *,
+ for_types_py=True,
+ for_response=False,
+ ):
"""Return a dictionary with template data necessary to render an
interface a Python class.
@@ -574,23 +628,52 @@ def interface_to_python_class(self, interface, interfaces, for_types_py=True):
}
```
"""
- type_ = schema.find_type(interface)
- if type_["kind"] != "interface":
+ type_ = self.find_type(interface, namespace)
+ if type_["kind"] not in ["interface", "response"]:
raise RuntimeError(f"Type {interface} is not an interface")
- k = {"name": interface, "args": []}
- self.add_behaviors(type_, k, for_types_py=for_types_py)
+ if type_["kind"] == "response":
+ # we consider responses as interfaces because they also have properties
+ # but the location of the properties is different
+ type_ = type_["body"]
+ k = {"name": interface, "for_response": for_response, "args": []}
+ self.add_behaviors(
+ type_, k, for_types_py=for_types_py, for_response=for_response
+ )
while True:
for arg in type_["properties"]:
- schema.add_attribute(k, arg, for_types_py=for_types_py)
+ if interface == "ResponseBody" and arg["name"] == "hits":
+ k["args"].append(
+ {
+ "name": "hits",
+ "type": "List[_R]",
+ "doc": [":arg hits: search results"],
+ "required": arg["required"],
+ }
+ )
+ elif interface == "ResponseBody" and arg["name"] == "aggregations":
+ k["args"].append(
+ {
+ "name": "aggregations",
+ "type": '"AggResponse[_R]"',
+ "doc": [":arg aggregations: aggregation results"],
+ "required": arg["required"],
+ }
+ )
+ else:
+ schema.add_attribute(
+ k, arg, for_types_py=for_types_py, for_response=for_response
+ )
if "inherits" not in type_ or "type" not in type_["inherits"]:
break
if "parent" not in k:
k["parent"] = type_["inherits"]["type"]["name"]
- if type_["inherits"]["type"]["name"] not in interfaces:
- interfaces.append(type_["inherits"]["type"]["name"])
- type_ = schema.find_type(
+ if type_["inherits"]["type"]["name"] not in self.interfaces:
+ self.interfaces.append(type_["inherits"]["type"]["name"])
+ if for_response:
+ self.response_interfaces.append(type_["inherits"]["type"]["name"])
+ type_ = self.find_type(
type_["inherits"]["type"]["name"],
type_["inherits"]["type"]["namespace"],
)
@@ -626,18 +709,48 @@ def generate_aggs_py(schema, filename):
print(f"Generated {filename}.")
+def generate_response_init_py(schema, filename):
+ """Generate response/__init__.py with all the response properties
+ documented and typed.
+ """
+ search_response = schema.interface_to_python_class(
+ "ResponseBody",
+ "_global.search",
+ for_types_py=False,
+ for_response=True,
+ )
+ ubq_response = schema.interface_to_python_class(
+ "Response",
+ "_global.update_by_query",
+ for_types_py=False,
+ for_response=True,
+ )
+ with open(filename, "wt") as f:
+ f.write(
+ response_init_py.render(response=search_response, ubq_response=ubq_response)
+ )
+ print(f"Generated {filename}.")
+
+
def generate_types_py(schema, filename):
"""Generate types.py"""
classes = {}
- schema.interfaces = sorted(schema.interfaces)
for interface in schema.interfaces:
if interface == "PipeSeparatedFlags":
continue # handled as a special case
- k = schema.interface_to_python_class(interface, schema.interfaces)
+ for_response = interface in schema.response_interfaces
+ k = schema.interface_to_python_class(
+ interface, for_types_py=True, for_response=for_response
+ )
classes[k["name"]] = k
+ # sort classes by being request/response and then by name
+ sorted_classes = sorted(
+ list(classes.keys()),
+ key=lambda i: str(int(i in schema.response_interfaces)) + i,
+ )
classes_list = []
- for n in classes:
+ for n in sorted_classes:
k = classes[n]
if k in classes_list:
continue
@@ -662,4 +775,7 @@ def generate_types_py(schema, filename):
schema = ElasticsearchSchema()
generate_query_py(schema, "elasticsearch_dsl/query.py")
generate_aggs_py(schema, "elasticsearch_dsl/aggs.py")
+ generate_response_init_py(schema, "elasticsearch_dsl/response/__init__.py")
+ # generate_response_hit_py(schema, "elasticsearch_dsl/response/hit.py")
+ # generate_response_aggs_py(schema, "elasticsearch_dsl/response/aggs.py")
generate_types_py(schema, "elasticsearch_dsl/types.py")
diff --git a/utils/templates/response.__init__.py.tpl b/utils/templates/response.__init__.py.tpl
new file mode 100644
index 00000000..5d5d7bac
--- /dev/null
+++ b/utils/templates/response.__init__.py.tpl
@@ -0,0 +1,221 @@
+# Licensed to Elasticsearch B.V. under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch B.V. licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generic,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
+
+from ..utils import _R, AttrDict, AttrList, _wrap
+from .hit import Hit, HitMeta
+
+if TYPE_CHECKING:
+ from ..aggs import Agg
+ from ..faceted_search_base import FacetedSearchBase
+ from ..search_base import Request, SearchBase
+ from ..update_by_query_base import UpdateByQueryBase
+ from .. import types
+
+__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta"]
+
+
+class Response(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch _search response.
+
+ {% for arg in response.args %}
+ {% for line in arg.doc %}
+ {{ line }}
+ {% endfor %}
+ {% endfor %}
+ """
+ _search: "SearchBase[_R]"
+ _faceted_search: "FacetedSearchBase[_R]"
+ _doc_class: Optional[_R]
+ _hits: List[_R]
+
+ {% for arg in response.args %}
+ {% if arg.name not in ["hits", "aggregations"] %}
+ {{ arg.name }}: {{ arg.type }}
+ {% endif %}
+ {% endfor %}
+
+ def __init__(
+ self,
+ search: "Request[_R]",
+ response: Dict[str, Any],
+ doc_class: Optional[_R] = None,
+ ):
+ super(AttrDict, self).__setattr__("_search", search)
+ super(AttrDict, self).__setattr__("_doc_class", doc_class)
+ super().__init__(response)
+
+ def __iter__(self) -> Iterator[_R]: # type: ignore[override]
+ return iter(self.hits)
+
+ def __getitem__(self, key: Union[slice, int, str]) -> Any:
+ if isinstance(key, (slice, int)):
+ # for slicing etc
+ return self.hits[key]
+ return super().__getitem__(key)
+
+ def __nonzero__(self) -> bool:
+ return bool(self.hits)
+
+ __bool__ = __nonzero__
+
+ def __repr__(self) -> str:
+ return "" % (self.hits or self.aggregations)
+
+ def __len__(self) -> int:
+ return len(self.hits)
+
+ def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]: # type: ignore[override]
+ return self._d_, self._search, self._doc_class
+
+ def __setstate__(
+ self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]] # type: ignore[override]
+ ) -> None:
+ super(AttrDict, self).__setattr__("_d_", state[0])
+ super(AttrDict, self).__setattr__("_search", state[1])
+ super(AttrDict, self).__setattr__("_doc_class", state[2])
+
+ def success(self) -> bool:
+ return self._shards.total == self._shards.successful and not self.timed_out
+
+ @property
+ def hits(self) -> List[_R]:
+ if not hasattr(self, "_hits"):
+ h = cast(AttrDict[Any], self._d_["hits"])
+
+ try:
+ hits = AttrList(list(map(self._search._get_result, h["hits"])))
+ except AttributeError as e:
+ # avoid raising AttributeError since it will be hidden by the property
+ raise TypeError("Could not parse hits.", e)
+
+ # avoid assigning _hits into self._d_
+ super(AttrDict, self).__setattr__("_hits", hits)
+ for k in h:
+ setattr(self._hits, k, _wrap(h[k]))
+ return self._hits
+
+ @property
+ def aggregations(self) -> "AggResponse[_R]":
+ return self.aggs
+
+ @property
+ def aggs(self) -> "AggResponse[_R]":
+ if not hasattr(self, "_aggs"):
+ aggs = AggResponse[_R](
+ cast("Agg[_R]", self._search.aggs),
+ self._search,
+ cast(Dict[str, Any], self._d_.get("aggregations", {})),
+ )
+
+ # avoid assigning _aggs into self._d_
+ super(AttrDict, self).__setattr__("_aggs", aggs)
+ return cast("AggResponse[_R]", self._aggs)
+
+ def search_after(self) -> "SearchBase[_R]":
+ """
+ Return a ``Search`` instance that retrieves the next page of results.
+
+ This method provides an easy way to paginate a long list of results using
+ the ``search_after`` option. For example::
+
+ page_size = 20
+ s = Search()[:page_size].sort("date")
+
+ while True:
+ # get a page of results
+ r = await s.execute()
+
+ # do something with this page of results
+
+ # exit the loop if we reached the end
+ if len(r.hits) < page_size:
+ break
+
+ # get a search object with the next page of results
+ s = r.search_after()
+
+ Note that the ``search_after`` option requires the search to have an
+ explicit ``sort`` order.
+ """
+ if len(self.hits) == 0:
+ raise ValueError("Cannot use search_after when there are no search results")
+ if not hasattr(self.hits[-1].meta, "sort"): # type: ignore
+ raise ValueError("Cannot use search_after when results are not sorted")
+ return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore
+
+
+class AggResponse(AttrDict[Any], Generic[_R]):
+ _meta: Dict[str, Any]
+
+ def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]):
+ super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs})
+ super().__init__(data)
+
+ def __getitem__(self, attr_name: str) -> Any:
+ if attr_name in self._meta["aggs"]:
+ # don't do self._meta['aggs'][attr_name] to avoid copying
+ agg = self._meta["aggs"].aggs[attr_name]
+ return agg.result(self._meta["search"], self._d_[attr_name])
+ return super().__getitem__(attr_name)
+
+ def __iter__(self) -> Iterator["Agg"]: # type: ignore[override]
+ for name in self._meta["aggs"]:
+ yield self[name]
+
+
+class UpdateByQueryResponse(AttrDict[Any], Generic[_R]):
+ """An Elasticsearch update by query response.
+
+ {% for arg in ubq_response.args %}
+ {% for line in arg.doc %}
+ {{ line }}
+ {% endfor %}
+ {% endfor %}
+ """
+ _search: "UpdateByQueryBase[_R]"
+
+ {% for arg in ubq_response.args %}
+ {{ arg.name }}: {{ arg.type }}
+ {% endfor %}
+
+ def __init__(
+ self,
+ search: "Request[_R]",
+ response: Dict[str, Any],
+ doc_class: Optional[_R] = None,
+ ):
+ super(AttrDict, self).__setattr__("_search", search)
+ super(AttrDict, self).__setattr__("_doc_class", doc_class)
+ super().__init__(response)
+
+ def success(self) -> bool:
+ return not self.timed_out and not self.failures
diff --git a/utils/templates/types.py.tpl b/utils/templates/types.py.tpl
index 8f854b0c..49617f5f 100644
--- a/utils/templates/types.py.tpl
+++ b/utils/templates/types.py.tpl
@@ -37,52 +37,57 @@ class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}):
{% endfor %}
"""
{% for arg in k.args %}
+ {% if arg.name not in ["keys", "items"] %}
{{ arg.name }}: {{ arg.type }}
+ {% else %}
+ {{ arg.name }}: {{ arg.type }} # type: ignore[assignment]
+ {% endif %}
{% endfor %}
+ {% if not k.for_response %}
def __init__(
self,
- {% for arg in k.args %}
- {% if arg.positional %}
+ {% for arg in k.args %}
+ {% if arg.positional %}
{{ arg.name }}: {{ arg.type }} = DEFAULT,
- {% endif %}
- {% endfor %}
- {% if k.args and not k.args[-1].positional %}
+ {% endif %}
+ {% endfor %}
+ {% if k.args and not k.args[-1].positional %}
*,
- {% endif %}
- {% for arg in k.args %}
- {% if not arg.positional %}
- {{ arg.name }}: {{ arg.type }} = DEFAULT,
{% endif %}
- {% endfor %}
+ {% for arg in k.args %}
+ {% if not arg.positional %}
+ {{ arg.name }}: {{ arg.type }} = DEFAULT,
+ {% endif %}
+ {% endfor %}
**kwargs: Any
):
- {% if k.is_single_field %}
+ {% if k.is_single_field %}
if _field is not DEFAULT:
kwargs[str(_field)] = _value
- {% elif k.is_multi_field %}
+ {% elif k.is_multi_field %}
if _fields is not DEFAULT:
for field, value in _fields.items():
kwargs[str(field)] = value
- {% endif %}
- {% for arg in k.args %}
- {% if not arg.positional %}
+ {% endif %}
+ {% for arg in k.args %}
+ {% if not arg.positional %}
if {{ arg.name }} is not DEFAULT:
- {% if "InstrumentedField" in arg.type %}
+ {% if "InstrumentedField" in arg.type %}
kwargs["{{ arg.name }}"] = str({{ arg.name }})
- {% else %}
+ {% else %}
kwargs["{{ arg.name }}"] = {{ arg.name }}
+ {% endif %}
{% endif %}
- {% endif %}
- {% endfor %}
- {% if k.parent %}
+ {% endfor %}
+ {% if k.parent %}
super().__init__(**kwargs)
- {% else %}
+ {% else %}
super().__init__(kwargs)
+ {% endif %}
{% endif %}
{% else %}
pass
{% endif %}
-
{% endfor %}