From 6c5b39adadf3062e000fc36ef3e86ca8f5fbbff3 Mon Sep 17 00:00:00 2001
From: Jamo Luhrsen
Date: Thu, 27 Jun 2024 10:35:54 -0700
Subject: [PATCH] fix build issues caused by k8s rebase to 1.30.2
- implement GetOperatorStateWithQuorum() for v1helpers.OperatorClient
interface which was added in openshift/library-go here [0]
- applyconfigv1.NetworkStatus().WithConditions() now requires
metav1.ConditionApplyConfiguration instead of metav1.Condition,
which came in with the client-go 1.30 rebase here [1]
- using new lib location for feature gates "openshift/api/features"
- refactor controller-runtime Watch() calls to handle new generics
support [2]
- moved RotatedSigningCASecret.JiraComponent to
RotatedSigningCASecret.AdditionalAnnotations.JiraComponent from here [3]
- added dummy path to apimachinery validation.IsValidIP() as it was
added as an argument here [4]. not relevant to the the call in this
case.
- bumped k8s.io/component-base to v0.31.0-alpha.2 to get past a
breaking issue with prometheus using a breaking type from 0.30.2
- the version of googleapis/api/expr/v1alpha1 that is brought in with
github.com/google/cel-go with anything newer than v0.17.8 no longer
includes GetConstExpr() so pinning that lib to v0.17.8
[0] https://github.com/openshift/library-go/commit/bd5e34cc067445cdf4a109c85546b7b2ee63819a
[1] https://github.com/openshift/client-go/pull/277/commits/792100eb1d45854d9eba938d920be3464f631ac5#diff-233949a4a2a7ca43d091c935b04748464200784e5377366d574945d3fd06ed89R76
[2] https://github.com/kubernetes-sigs/controller-runtime/pull/2783
[3] https://github.com/openshift/library-go/commit/df7ff423f9326f978220be95b3c663a76481f0a7
[4] https://github.com/openshift/kubernetes-apimachinery/commit/89b941452299f7f7920eee87862a93ca7ad7b166
Signed-off-by: Jamo Luhrsen
---
go.mod | 21 +-
go.sum | 36 +-
pkg/client/operatorclient.go | 9 +
.../allowlist/allowlist_controller.go | 22 +-
.../clusterconfig/clusterconfig_controller.go | 13 +-
.../configmap_ca_injector/controller.go | 22 +-
.../connectivity_check_controller.go | 9 +-
.../dashboards/dashboard_controller.go | 31 +-
.../egress_router/egress_router_controller.go | 2 +-
.../infrastructureconfig_controller.go | 13 +-
.../ingressconfig/ingressconfig_controller.go | 2 +-
pkg/controller/operconfig/cluster.go | 5 +-
.../operconfig/operconfig_controller.go | 108 +-
pkg/controller/pki/pki_controller.go | 31 +-
pkg/controller/proxyconfig/controller.go | 8 +-
pkg/controller/signer/signer-controller.go | 2 +-
pkg/controller/statusmanager/pod_watcher.go | 7 +-
pkg/network/ovn_kubernetes.go | 5 +-
pkg/network/ovn_kubernetes_test.go | 35 +-
pkg/network/render.go | 3 +-
pkg/util/validation/network.go | 3 +-
.../antlr/antlr4/runtime/Go/antlr/v4/LICENSE | 26 +
.../antlr4/runtime/Go/antlr/v4/antlrdoc.go | 68 +
.../antlr4/runtime/Go}/antlr/v4/atn.go | 17 +-
.../antlr4/runtime/Go/antlr/v4/atn_config.go | 303 ++
.../runtime/Go/antlr/v4/atn_config_set.go | 441 +++
.../antlr/v4/atn_deserialization_options.go | 7 +-
.../runtime/Go}/antlr/v4/atn_deserializer.go | 9 +-
.../runtime/Go}/antlr/v4/atn_simulator.go | 17 +-
.../antlr4/runtime/Go}/antlr/v4/atn_state.go | 224 +-
.../antlr4/runtime/Go}/antlr/v4/atn_type.go | 0
.../runtime/Go}/antlr/v4/char_stream.go | 2 +-
.../Go}/antlr/v4/common_token_factory.go | 0
.../Go}/antlr/v4/common_token_stream.go | 39 +-
.../runtime/Go}/antlr/v4/comparators.go | 33 +-
.../antlr4/runtime/Go}/antlr/v4/dfa.go | 47 +-
.../runtime/Go}/antlr/v4/dfa_serializer.go | 2 +-
.../antlr4/runtime/Go}/antlr/v4/dfa_state.go | 29 +-
.../Go}/antlr/v4/diagnostic_error_listener.go | 11 +-
.../runtime/Go}/antlr/v4/error_listener.go | 40 +-
.../runtime/Go}/antlr/v4/error_strategy.go | 450 +--
.../antlr4/runtime/Go}/antlr/v4/errors.go | 73 +-
.../runtime/Go}/antlr/v4/file_stream.go | 46 +-
.../runtime/Go/antlr/v4/input_stream.go | 113 +
.../antlr4/runtime/Go}/antlr/v4/int_stream.go | 0
.../runtime/Go}/antlr/v4/interval_set.go | 60 +-
.../antlr4/runtime/Go/antlr/v4/jcollect.go | 198 ++
.../antlr4/runtime/Go}/antlr/v4/lexer.go | 68 +-
.../runtime/Go}/antlr/v4/lexer_action.go | 100 +-
.../Go}/antlr/v4/lexer_action_executor.go | 61 +-
.../Go}/antlr/v4/lexer_atn_simulator.go | 185 +-
.../runtime/Go}/antlr/v4/ll1_analyzer.go | 63 +-
.../antlr4/runtime/Go}/antlr/v4/parser.go | 160 +-
.../Go}/antlr/v4/parser_atn_simulator.go | 785 ++---
.../Go}/antlr/v4/parser_rule_context.go | 85 +-
.../runtime/Go/antlr/v4/prediction_context.go | 806 +++++
.../runtime/Go/antlr/v4/prediction_mode.go | 529 +++
.../antlr4/runtime/Go}/antlr/v4/recognizer.go | 67 +-
.../runtime/Go/antlr/v4/rule_context.go | 114 +
.../runtime/Go}/antlr/v4/semantic_context.go | 33 +-
.../antlr4/runtime/Go}/antlr/v4/token.go | 118 +-
.../runtime/Go}/antlr/v4/token_source.go | 0
.../runtime/Go}/antlr/v4/token_stream.go | 3 +-
.../Go}/antlr/v4/tokenstream_rewriter.go | 221 +-
.../runtime/Go}/antlr/v4/trace_listener.go | 0
.../antlr4/runtime/Go}/antlr/v4/transition.go | 229 +-
.../antlr4/runtime/Go}/antlr/v4/tree.go | 109 +-
.../antlr4/runtime/Go}/antlr/v4/trees.go | 22 +-
.../antlr4/runtime/Go}/antlr/v4/utils.go | 137 +-
.../antlr4/runtime/Go/antlr/v4/utils_set.go | 235 ++
.../github.com/antlr4-go/antlr/v4/.gitignore | 18 -
vendor/github.com/antlr4-go/antlr/v4/LICENSE | 28 -
.../github.com/antlr4-go/antlr/v4/README.md | 54 -
.../github.com/antlr4-go/antlr/v4/antlrdoc.go | 102 -
.../antlr4-go/antlr/v4/atn_config.go | 332 --
.../antlr4-go/antlr/v4/atn_config_set.go | 301 --
.../antlr4-go/antlr/v4/configuration.go | 214 --
.../antlr4-go/antlr/v4/input_stream.go | 157 -
.../github.com/antlr4-go/antlr/v4/jcollect.go | 684 ----
vendor/github.com/antlr4-go/antlr/v4/mutex.go | 41 -
.../antlr4-go/antlr/v4/mutex_nomutex.go | 32 -
.../antlr4-go/antlr/v4/nostatistics.go | 47 -
.../antlr4-go/antlr/v4/prediction_context.go | 727 -----
.../antlr/v4/prediction_context_cache.go | 48 -
.../antlr4-go/antlr/v4/prediction_mode.go | 536 ---
.../antlr4-go/antlr/v4/rule_context.go | 40 -
.../antlr4-go/antlr/v4/statistics.go | 280 --
.../antlr4-go/antlr/v4/stats_data.go | 23 -
.../json-patch/v5/internal/json/decode.go | 1385 ++++++++
.../json-patch/v5/internal/json/encode.go | 1486 +++++++++
.../json-patch/v5/internal/json/fold.go | 141 +
.../json-patch/v5/internal/json/fuzz.go | 42 +
.../json-patch/v5/internal/json/indent.go | 143 +
.../json-patch/v5/internal/json/scanner.go | 610 ++++
.../json-patch/v5/internal/json/stream.go | 495 +++
.../json-patch/v5/internal/json/tables.go | 218 ++
.../json-patch/v5/internal/json/tags.go | 38 +
.../github.com/evanphx/json-patch/v5/merge.go | 110 +-
.../github.com/evanphx/json-patch/v5/patch.go | 436 ++-
.../github.com/google/cel-go/cel/BUILD.bazel | 8 -
vendor/github.com/google/cel-go/cel/decls.go | 40 +
vendor/github.com/google/cel-go/cel/env.go | 128 +-
.../github.com/google/cel-go/cel/folding.go | 559 ----
.../github.com/google/cel-go/cel/inlining.go | 228 --
vendor/github.com/google/cel-go/cel/io.go | 36 +-
.../github.com/google/cel-go/cel/library.go | 61 +-
vendor/github.com/google/cel-go/cel/macro.go | 456 +--
.../github.com/google/cel-go/cel/optimizer.go | 509 ---
.../github.com/google/cel-go/cel/options.go | 2 -
.../github.com/google/cel-go/cel/program.go | 60 +-
.../github.com/google/cel-go/cel/validator.go | 51 +-
.../google/cel-go/checker/BUILD.bazel | 1 +
.../google/cel-go/checker/checker.go | 356 +-
.../github.com/google/cel-go/checker/cost.go | 194 +-
.../google/cel-go/checker/decls/decls.go | 2 +-
.../github.com/google/cel-go/checker/env.go | 8 -
.../google/cel-go/checker/errors.go | 18 +-
.../google/cel-go/checker/printer.go | 34 +-
.../github.com/google/cel-go/checker/types.go | 13 +-
.../google/cel-go/common/ast/BUILD.bazel | 16 +-
.../google/cel-go/common/ast/ast.go | 480 +--
.../google/cel-go/common/ast/conversion.go | 632 ----
.../google/cel-go/common/ast/expr.go | 967 +++---
.../google/cel-go/common/ast/factory.go | 303 --
.../google/cel-go/common/ast/navigable.go | 652 ----
.../cel-go/common/containers/BUILD.bazel | 4 +-
.../cel-go/common/containers/container.go | 22 +-
.../google/cel-go/common/debug/BUILD.bazel | 4 +-
.../google/cel-go/common/debug/debug.go | 156 +-
.../github.com/google/cel-go/common/errors.go | 2 +-
.../google/cel-go/common/types/err.go | 25 +-
.../google/cel-go/common/types/int.go | 12 -
.../google/cel-go/common/types/list.go | 8 +-
.../google/cel-go/common/types/optional.go | 2 +-
.../google/cel-go/common/types/overflow.go | 40 -
.../google/cel-go/common/types/provider.go | 51 +-
.../google/cel-go/common/types/string.go | 7 +-
.../google/cel-go/common/types/types.go | 19 +-
.../google/cel-go/common/types/uint.go | 12 -
.../github.com/google/cel-go/ext/BUILD.bazel | 7 +-
vendor/github.com/google/cel-go/ext/README.md | 14 -
.../github.com/google/cel-go/ext/bindings.go | 24 +-
.../google/cel-go/ext/formatting.go | 904 ------
vendor/github.com/google/cel-go/ext/guards.go | 11 +-
vendor/github.com/google/cel-go/ext/math.go | 67 +-
vendor/github.com/google/cel-go/ext/native.go | 66 +-
vendor/github.com/google/cel-go/ext/protos.go | 45 +-
vendor/github.com/google/cel-go/ext/sets.go | 64 -
.../github.com/google/cel-go/ext/strings.go | 456 ++-
.../google/cel-go/interpreter/BUILD.bazel | 1 +
.../google/cel-go/interpreter/attributes.go | 3 -
.../google/cel-go/interpreter/formatting.go | 383 +++
.../cel-go/interpreter/interpretable.go | 50 +-
.../google/cel-go/interpreter/interpreter.go | 26 +-
.../google/cel-go/interpreter/planner.go | 279 +-
.../google/cel-go/interpreter/prune.go | 496 +--
.../google/cel-go/parser/BUILD.bazel | 9 +-
.../google/cel-go/parser/gen/BUILD.bazel | 2 +-
.../cel-go/parser/gen/cel_base_listener.go | 4 +-
.../cel-go/parser/gen/cel_base_visitor.go | 5 +-
.../google/cel-go/parser/gen/cel_lexer.go | 603 ++--
.../google/cel-go/parser/gen/cel_listener.go | 5 +-
.../google/cel-go/parser/gen/cel_parser.go | 2478 +++++---------
.../google/cel-go/parser/gen/cel_visitor.go | 8 +-
.../google/cel-go/parser/gen/generate.sh | 2 +-
.../github.com/google/cel-go/parser/helper.go | 580 ++--
.../github.com/google/cel-go/parser/input.go | 4 +-
.../github.com/google/cel-go/parser/macro.go | 192 +-
.../github.com/google/cel-go/parser/parser.go | 189 +-
.../google/cel-go/parser/unparser.go | 222 +-
.../openshift/api/features/features.go | 537 +++
.../github.com/openshift/api/features/util.go | 193 ++
.../gopkg.in/evanphx/json-patch.v4/.gitignore | 6 +
vendor/gopkg.in/evanphx/json-patch.v4/LICENSE | 25 +
.../gopkg.in/evanphx/json-patch.v4/README.md | 317 ++
.../gopkg.in/evanphx/json-patch.v4/errors.go | 38 +
.../gopkg.in/evanphx/json-patch.v4/merge.go | 389 +++
.../gopkg.in/evanphx/json-patch.v4/patch.go | 851 +++++
vendor/k8s.io/api/admission/v1/doc.go | 2 +-
.../k8s.io/api/admission/v1/generated.proto | 18 +-
vendor/k8s.io/api/admission/v1/types.go | 1 +
.../v1/zz_generated.prerelease-lifecycle.go | 28 +
.../api/admission/v1beta1/generated.proto | 18 +-
.../api/admissionregistration/v1/doc.go | 1 +
.../admissionregistration/v1/generated.proto | 39 +-
.../api/admissionregistration/v1/types.go | 6 +
.../v1/zz_generated.prerelease-lifecycle.go | 70 +
.../v1alpha1/generated.proto | 18 +-
.../v1beta1/generated.proto | 38 +-
vendor/k8s.io/api/apidiscovery/v2/doc.go | 2 +-
.../api/apidiscovery/v2/generated.proto | 10 +-
vendor/k8s.io/api/apidiscovery/v2/types.go | 2 +
.../v2/zz_generated.prerelease-lifecycle.go | 34 +
.../api/apidiscovery/v2beta1/generated.proto | 10 +-
.../v1alpha1/generated.proto | 6 +-
vendor/k8s.io/api/apps/v1/doc.go | 1 +
vendor/k8s.io/api/apps/v1/generated.proto | 63 +-
vendor/k8s.io/api/apps/v1/types.go | 13 +-
.../apps/v1/types_swagger_doc_generated.go | 2 +-
.../v1/zz_generated.prerelease-lifecycle.go | 82 +
.../k8s.io/api/apps/v1beta1/generated.proto | 38 +-
.../k8s.io/api/apps/v1beta2/generated.proto | 62 +-
vendor/k8s.io/api/authentication/v1/doc.go | 1 +
.../api/authentication/v1/generated.proto | 8 +-
vendor/k8s.io/api/authentication/v1/types.go | 3 +
.../v1/zz_generated.prerelease-lifecycle.go | 40 +
.../authentication/v1alpha1/generated.proto | 4 +-
.../authentication/v1beta1/generated.proto | 6 +-
vendor/k8s.io/api/authorization/v1/doc.go | 2 +-
.../api/authorization/v1/generated.proto | 8 +-
vendor/k8s.io/api/authorization/v1/types.go | 4 +
.../v1/zz_generated.prerelease-lifecycle.go | 46 +
.../api/authorization/v1beta1/generated.proto | 8 +-
vendor/k8s.io/api/autoscaling/v1/doc.go | 1 +
.../k8s.io/api/autoscaling/v1/generated.proto | 50 +-
vendor/k8s.io/api/autoscaling/v1/types.go | 3 +
.../v1/zz_generated.prerelease-lifecycle.go | 40 +
vendor/k8s.io/api/autoscaling/v2/doc.go | 1 +
.../k8s.io/api/autoscaling/v2/generated.proto | 18 +-
vendor/k8s.io/api/autoscaling/v2/types.go | 2 +
.../v2/zz_generated.prerelease-lifecycle.go | 34 +
.../api/autoscaling/v2beta1/generated.proto | 48 +-
.../api/autoscaling/v2beta2/generated.proto | 18 +-
vendor/k8s.io/api/batch/v1/doc.go | 2 +-
vendor/k8s.io/api/batch/v1/generated.proto | 35 +-
vendor/k8s.io/api/batch/v1/types.go | 11 +-
.../batch/v1/types_swagger_doc_generated.go | 4 +-
.../v1/zz_generated.prerelease-lifecycle.go | 46 +
.../k8s.io/api/batch/v1beta1/generated.proto | 14 +-
vendor/k8s.io/api/certificates/v1/doc.go | 2 +-
.../api/certificates/v1/generated.proto | 8 +-
vendor/k8s.io/api/certificates/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../api/certificates/v1alpha1/generated.proto | 4 +-
.../api/certificates/v1beta1/generated.proto | 8 +-
vendor/k8s.io/api/coordination/v1/doc.go | 1 +
.../api/coordination/v1/generated.proto | 8 +-
vendor/k8s.io/api/coordination/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../api/coordination/v1beta1/generated.proto | 8 +-
vendor/k8s.io/api/core/v1/doc.go | 1 +
vendor/k8s.io/api/core/v1/generated.pb.go | 2876 ++++++++++-------
vendor/k8s.io/api/core/v1/generated.proto | 250 +-
vendor/k8s.io/api/core/v1/types.go | 131 +-
.../core/v1/types_swagger_doc_generated.go | 58 +-
.../api/core/v1/zz_generated.deepcopy.go | 52 +
.../v1/zz_generated.prerelease-lifecycle.go | 274 ++
vendor/k8s.io/api/discovery/v1/doc.go | 1 +
.../k8s.io/api/discovery/v1/generated.proto | 6 +-
vendor/k8s.io/api/discovery/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../api/discovery/v1beta1/generated.proto | 6 +-
vendor/k8s.io/api/events/v1/doc.go | 2 +-
vendor/k8s.io/api/events/v1/generated.proto | 18 +-
vendor/k8s.io/api/events/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../k8s.io/api/events/v1beta1/generated.proto | 18 +-
.../api/extensions/v1beta1/generated.proto | 62 +-
vendor/k8s.io/api/flowcontrol/v1/doc.go | 1 +
.../k8s.io/api/flowcontrol/v1/generated.proto | 12 +-
vendor/k8s.io/api/flowcontrol/v1/types.go | 4 +
.../v1/zz_generated.prerelease-lifecycle.go | 46 +
.../api/flowcontrol/v1beta1/generated.proto | 12 +-
.../api/flowcontrol/v1beta2/generated.proto | 12 +-
.../api/flowcontrol/v1beta3/generated.proto | 12 +-
.../api/imagepolicy/v1alpha1/generated.proto | 2 +-
vendor/k8s.io/api/networking/v1/doc.go | 1 +
.../k8s.io/api/networking/v1/generated.proto | 22 +-
vendor/k8s.io/api/networking/v1/types.go | 6 +
.../v1/zz_generated.prerelease-lifecycle.go | 58 +
.../api/networking/v1alpha1/generated.proto | 10 +-
.../api/networking/v1beta1/generated.proto | 12 +-
vendor/k8s.io/api/node/v1/doc.go | 2 +-
vendor/k8s.io/api/node/v1/generated.proto | 8 +-
vendor/k8s.io/api/node/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../k8s.io/api/node/v1alpha1/generated.proto | 8 +-
.../k8s.io/api/node/v1beta1/generated.proto | 8 +-
vendor/k8s.io/api/policy/v1/doc.go | 1 +
vendor/k8s.io/api/policy/v1/generated.proto | 18 +-
vendor/k8s.io/api/policy/v1/types.go | 3 +
.../v1/zz_generated.prerelease-lifecycle.go | 40 +
.../k8s.io/api/policy/v1beta1/generated.proto | 18 +-
vendor/k8s.io/api/rbac/v1/doc.go | 2 +-
vendor/k8s.io/api/rbac/v1/generated.proto | 18 +-
vendor/k8s.io/api/rbac/v1/types.go | 10 +-
.../v1/zz_generated.prerelease-lifecycle.go | 70 +
.../k8s.io/api/rbac/v1alpha1/generated.proto | 18 +-
vendor/k8s.io/api/rbac/v1alpha1/types.go | 2 +-
.../k8s.io/api/rbac/v1beta1/generated.proto | 18 +-
vendor/k8s.io/api/rbac/v1beta1/types.go | 2 +-
.../api/resource/v1alpha2/generated.proto | 50 +-
.../api/resource/v1alpha2/namedresources.go | 2 +-
vendor/k8s.io/api/resource/v1alpha2/types.go | 4 +-
vendor/k8s.io/api/scheduling/v1/doc.go | 2 +-
.../k8s.io/api/scheduling/v1/generated.proto | 4 +-
vendor/k8s.io/api/scheduling/v1/types.go | 2 +
.../v1/zz_generated.prerelease-lifecycle.go | 34 +
.../api/scheduling/v1alpha1/generated.proto | 4 +-
.../api/scheduling/v1beta1/generated.proto | 4 +-
vendor/k8s.io/api/storage/v1/doc.go | 1 +
vendor/k8s.io/api/storage/v1/generated.proto | 32 +-
vendor/k8s.io/api/storage/v1/types.go | 10 +
.../v1/zz_generated.prerelease-lifecycle.go | 82 +
.../api/storage/v1alpha1/generated.proto | 22 +-
.../api/storage/v1beta1/generated.proto | 32 +-
.../storagemigration/v1alpha1/generated.proto | 6 +-
.../core/v1/containerstatus.go | 9 +
.../core/v1/containeruser.go | 39 +
.../core/v1/linuxcontaineruser.go | 59 +
.../core/v1/podsecuritycontext.go | 31 +-
.../applyconfigurations/internal/internal.go | 30 +
vendor/k8s.io/client-go/dynamic/simple.go | 12 +-
vendor/k8s.io/client-go/features/envvar.go | 62 +-
.../fake/fake_mutatingwebhookconfiguration.go | 30 +-
.../v1/fake/fake_validatingadmissionpolicy.go | 42 +-
.../fake_validatingadmissionpolicybinding.go | 30 +-
.../fake_validatingwebhookconfiguration.go | 30 +-
.../v1/mutatingwebhookconfiguration.go | 40 +-
.../v1/validatingadmissionpolicy.go | 40 +-
.../v1/validatingadmissionpolicybinding.go | 40 +-
.../v1/validatingwebhookconfiguration.go | 40 +-
.../fake/fake_validatingadmissionpolicy.go | 42 +-
.../fake_validatingadmissionpolicybinding.go | 30 +-
.../v1alpha1/validatingadmissionpolicy.go | 40 +-
.../validatingadmissionpolicybinding.go | 40 +-
.../fake/fake_mutatingwebhookconfiguration.go | 30 +-
.../fake/fake_validatingadmissionpolicy.go | 42 +-
.../fake_validatingadmissionpolicybinding.go | 30 +-
.../fake_validatingwebhookconfiguration.go | 30 +-
.../v1beta1/mutatingwebhookconfiguration.go | 40 +-
.../v1beta1/validatingadmissionpolicy.go | 40 +-
.../validatingadmissionpolicybinding.go | 40 +-
.../v1beta1/validatingwebhookconfiguration.go | 40 +-
.../v1alpha1/fake/fake_storageversion.go | 42 +-
.../v1alpha1/storageversion.go | 40 +-
.../typed/apps/v1/controllerrevision.go | 41 +-
.../kubernetes/typed/apps/v1/daemonset.go | 41 +-
.../kubernetes/typed/apps/v1/deployment.go | 41 +-
.../apps/v1/fake/fake_controllerrevision.go | 30 +-
.../typed/apps/v1/fake/fake_daemonset.go | 42 +-
.../typed/apps/v1/fake/fake_deployment.go | 55 +-
.../typed/apps/v1/fake/fake_replicaset.go | 55 +-
.../typed/apps/v1/fake/fake_statefulset.go | 55 +-
.../kubernetes/typed/apps/v1/replicaset.go | 41 +-
.../kubernetes/typed/apps/v1/statefulset.go | 41 +-
.../typed/apps/v1beta1/controllerrevision.go | 41 +-
.../typed/apps/v1beta1/deployment.go | 41 +-
.../v1beta1/fake/fake_controllerrevision.go | 30 +-
.../apps/v1beta1/fake/fake_deployment.go | 42 +-
.../apps/v1beta1/fake/fake_statefulset.go | 42 +-
.../typed/apps/v1beta1/statefulset.go | 41 +-
.../typed/apps/v1beta2/controllerrevision.go | 41 +-
.../typed/apps/v1beta2/daemonset.go | 41 +-
.../typed/apps/v1beta2/deployment.go | 41 +-
.../v1beta2/fake/fake_controllerrevision.go | 30 +-
.../typed/apps/v1beta2/fake/fake_daemonset.go | 42 +-
.../apps/v1beta2/fake/fake_deployment.go | 42 +-
.../apps/v1beta2/fake/fake_replicaset.go | 42 +-
.../apps/v1beta2/fake/fake_statefulset.go | 55 +-
.../typed/apps/v1beta2/replicaset.go | 41 +-
.../typed/apps/v1beta2/statefulset.go | 41 +-
.../v1/fake/fake_selfsubjectreview.go | 5 +-
.../v1/fake/fake_tokenreview.go | 5 +-
.../v1alpha1/fake/fake_selfsubjectreview.go | 5 +-
.../v1beta1/fake/fake_selfsubjectreview.go | 5 +-
.../v1beta1/fake/fake_tokenreview.go | 5 +-
.../v1/fake/fake_localsubjectaccessreview.go | 5 +-
.../v1/fake/fake_selfsubjectaccessreview.go | 5 +-
.../v1/fake/fake_selfsubjectrulesreview.go | 5 +-
.../v1/fake/fake_subjectaccessreview.go | 5 +-
.../fake/fake_localsubjectaccessreview.go | 5 +-
.../fake/fake_selfsubjectaccessreview.go | 5 +-
.../fake/fake_selfsubjectrulesreview.go | 5 +-
.../v1beta1/fake/fake_subjectaccessreview.go | 5 +-
.../v1/fake/fake_horizontalpodautoscaler.go | 42 +-
.../autoscaling/v1/horizontalpodautoscaler.go | 41 +-
.../v2/fake/fake_horizontalpodautoscaler.go | 42 +-
.../autoscaling/v2/horizontalpodautoscaler.go | 41 +-
.../fake/fake_horizontalpodautoscaler.go | 42 +-
.../v2beta1/horizontalpodautoscaler.go | 41 +-
.../fake/fake_horizontalpodautoscaler.go | 42 +-
.../v2beta2/horizontalpodautoscaler.go | 41 +-
.../kubernetes/typed/batch/v1/cronjob.go | 41 +-
.../typed/batch/v1/fake/fake_cronjob.go | 42 +-
.../typed/batch/v1/fake/fake_job.go | 42 +-
.../kubernetes/typed/batch/v1/job.go | 41 +-
.../kubernetes/typed/batch/v1beta1/cronjob.go | 41 +-
.../typed/batch/v1beta1/fake/fake_cronjob.go | 42 +-
.../v1/certificatesigningrequest.go | 40 +-
.../v1/fake/fake_certificatesigningrequest.go | 47 +-
.../v1alpha1/clustertrustbundle.go | 40 +-
.../v1alpha1/fake/fake_clustertrustbundle.go | 30 +-
.../v1beta1/certificatesigningrequest.go | 40 +-
.../fake/fake_certificatesigningrequest.go | 42 +-
.../typed/coordination/v1/fake/fake_lease.go | 30 +-
.../kubernetes/typed/coordination/v1/lease.go | 41 +-
.../coordination/v1beta1/fake/fake_lease.go | 30 +-
.../typed/coordination/v1beta1/lease.go | 41 +-
.../typed/core/v1/componentstatus.go | 40 +-
.../kubernetes/typed/core/v1/configmap.go | 41 +-
.../kubernetes/typed/core/v1/endpoints.go | 41 +-
.../kubernetes/typed/core/v1/event.go | 41 +-
.../core/v1/fake/fake_componentstatus.go | 30 +-
.../typed/core/v1/fake/fake_configmap.go | 30 +-
.../typed/core/v1/fake/fake_endpoints.go | 30 +-
.../typed/core/v1/fake/fake_event.go | 30 +-
.../typed/core/v1/fake/fake_limitrange.go | 30 +-
.../typed/core/v1/fake/fake_namespace.go | 42 +-
.../typed/core/v1/fake/fake_node.go | 42 +-
.../core/v1/fake/fake_persistentvolume.go | 42 +-
.../v1/fake/fake_persistentvolumeclaim.go | 42 +-
.../kubernetes/typed/core/v1/fake/fake_pod.go | 45 +-
.../typed/core/v1/fake/fake_podtemplate.go | 30 +-
.../v1/fake/fake_replicationcontroller.go | 50 +-
.../typed/core/v1/fake/fake_resourcequota.go | 42 +-
.../typed/core/v1/fake/fake_secret.go | 30 +-
.../typed/core/v1/fake/fake_service.go | 42 +-
.../typed/core/v1/fake/fake_serviceaccount.go | 35 +-
.../kubernetes/typed/core/v1/limitrange.go | 41 +-
.../kubernetes/typed/core/v1/namespace.go | 40 +-
.../kubernetes/typed/core/v1/node.go | 40 +-
.../typed/core/v1/persistentvolume.go | 40 +-
.../typed/core/v1/persistentvolumeclaim.go | 41 +-
.../client-go/kubernetes/typed/core/v1/pod.go | 41 +-
.../kubernetes/typed/core/v1/podtemplate.go | 41 +-
.../typed/core/v1/replicationcontroller.go | 41 +-
.../kubernetes/typed/core/v1/resourcequota.go | 41 +-
.../kubernetes/typed/core/v1/secret.go | 41 +-
.../kubernetes/typed/core/v1/service.go | 41 +-
.../typed/core/v1/serviceaccount.go | 41 +-
.../typed/discovery/v1/endpointslice.go | 41 +-
.../discovery/v1/fake/fake_endpointslice.go | 30 +-
.../typed/discovery/v1beta1/endpointslice.go | 41 +-
.../v1beta1/fake/fake_endpointslice.go | 30 +-
.../kubernetes/typed/events/v1/event.go | 41 +-
.../typed/events/v1/fake/fake_event.go | 30 +-
.../kubernetes/typed/events/v1beta1/event.go | 41 +-
.../typed/events/v1beta1/fake/fake_event.go | 30 +-
.../typed/extensions/v1beta1/daemonset.go | 41 +-
.../typed/extensions/v1beta1/deployment.go | 41 +-
.../extensions/v1beta1/fake/fake_daemonset.go | 42 +-
.../v1beta1/fake/fake_deployment.go | 55 +-
.../extensions/v1beta1/fake/fake_ingress.go | 42 +-
.../v1beta1/fake/fake_networkpolicy.go | 30 +-
.../v1beta1/fake/fake_replicaset.go | 55 +-
.../typed/extensions/v1beta1/ingress.go | 41 +-
.../typed/extensions/v1beta1/networkpolicy.go | 41 +-
.../typed/extensions/v1beta1/replicaset.go | 41 +-
.../flowcontrol/v1/fake/fake_flowschema.go | 42 +-
.../fake/fake_prioritylevelconfiguration.go | 42 +-
.../typed/flowcontrol/v1/flowschema.go | 40 +-
.../v1/prioritylevelconfiguration.go | 40 +-
.../v1beta1/fake/fake_flowschema.go | 42 +-
.../fake/fake_prioritylevelconfiguration.go | 42 +-
.../typed/flowcontrol/v1beta1/flowschema.go | 40 +-
.../v1beta1/prioritylevelconfiguration.go | 40 +-
.../v1beta2/fake/fake_flowschema.go | 42 +-
.../fake/fake_prioritylevelconfiguration.go | 42 +-
.../typed/flowcontrol/v1beta2/flowschema.go | 40 +-
.../v1beta2/prioritylevelconfiguration.go | 40 +-
.../v1beta3/fake/fake_flowschema.go | 42 +-
.../fake/fake_prioritylevelconfiguration.go | 42 +-
.../typed/flowcontrol/v1beta3/flowschema.go | 40 +-
.../v1beta3/prioritylevelconfiguration.go | 40 +-
.../typed/networking/v1/fake/fake_ingress.go | 42 +-
.../networking/v1/fake/fake_ingressclass.go | 30 +-
.../networking/v1/fake/fake_networkpolicy.go | 30 +-
.../kubernetes/typed/networking/v1/ingress.go | 41 +-
.../typed/networking/v1/ingressclass.go | 40 +-
.../typed/networking/v1/networkpolicy.go | 41 +-
.../v1alpha1/fake/fake_ipaddress.go | 30 +-
.../v1alpha1/fake/fake_servicecidr.go | 42 +-
.../typed/networking/v1alpha1/ipaddress.go | 40 +-
.../typed/networking/v1alpha1/servicecidr.go | 40 +-
.../networking/v1beta1/fake/fake_ingress.go | 42 +-
.../v1beta1/fake/fake_ingressclass.go | 30 +-
.../typed/networking/v1beta1/ingress.go | 41 +-
.../typed/networking/v1beta1/ingressclass.go | 40 +-
.../typed/node/v1/fake/fake_runtimeclass.go | 30 +-
.../kubernetes/typed/node/v1/runtimeclass.go | 40 +-
.../node/v1alpha1/fake/fake_runtimeclass.go | 30 +-
.../typed/node/v1alpha1/runtimeclass.go | 40 +-
.../node/v1beta1/fake/fake_runtimeclass.go | 30 +-
.../typed/node/v1beta1/runtimeclass.go | 40 +-
.../v1/fake/fake_poddisruptionbudget.go | 42 +-
.../typed/policy/v1/poddisruptionbudget.go | 41 +-
.../v1beta1/fake/fake_poddisruptionbudget.go | 42 +-
.../policy/v1beta1/poddisruptionbudget.go | 41 +-
.../kubernetes/typed/rbac/v1/clusterrole.go | 40 +-
.../typed/rbac/v1/clusterrolebinding.go | 40 +-
.../typed/rbac/v1/fake/fake_clusterrole.go | 30 +-
.../rbac/v1/fake/fake_clusterrolebinding.go | 30 +-
.../typed/rbac/v1/fake/fake_role.go | 30 +-
.../typed/rbac/v1/fake/fake_rolebinding.go | 30 +-
.../kubernetes/typed/rbac/v1/role.go | 41 +-
.../kubernetes/typed/rbac/v1/rolebinding.go | 41 +-
.../typed/rbac/v1alpha1/clusterrole.go | 40 +-
.../typed/rbac/v1alpha1/clusterrolebinding.go | 40 +-
.../rbac/v1alpha1/fake/fake_clusterrole.go | 30 +-
.../v1alpha1/fake/fake_clusterrolebinding.go | 30 +-
.../typed/rbac/v1alpha1/fake/fake_role.go | 30 +-
.../rbac/v1alpha1/fake/fake_rolebinding.go | 30 +-
.../kubernetes/typed/rbac/v1alpha1/role.go | 41 +-
.../typed/rbac/v1alpha1/rolebinding.go | 41 +-
.../typed/rbac/v1beta1/clusterrole.go | 40 +-
.../typed/rbac/v1beta1/clusterrolebinding.go | 40 +-
.../rbac/v1beta1/fake/fake_clusterrole.go | 30 +-
.../v1beta1/fake/fake_clusterrolebinding.go | 30 +-
.../typed/rbac/v1beta1/fake/fake_role.go | 30 +-
.../rbac/v1beta1/fake/fake_rolebinding.go | 30 +-
.../kubernetes/typed/rbac/v1beta1/role.go | 41 +-
.../typed/rbac/v1beta1/rolebinding.go | 41 +-
.../fake/fake_podschedulingcontext.go | 42 +-
.../v1alpha2/fake/fake_resourceclaim.go | 42 +-
.../fake/fake_resourceclaimparameters.go | 30 +-
.../fake/fake_resourceclaimtemplate.go | 30 +-
.../v1alpha2/fake/fake_resourceclass.go | 30 +-
.../fake/fake_resourceclassparameters.go | 30 +-
.../v1alpha2/fake/fake_resourceslice.go | 30 +-
.../resource/v1alpha2/podschedulingcontext.go | 41 +-
.../typed/resource/v1alpha2/resourceclaim.go | 41 +-
.../v1alpha2/resourceclaimparameters.go | 41 +-
.../v1alpha2/resourceclaimtemplate.go | 41 +-
.../typed/resource/v1alpha2/resourceclass.go | 40 +-
.../v1alpha2/resourceclassparameters.go | 41 +-
.../typed/resource/v1alpha2/resourceslice.go | 40 +-
.../scheduling/v1/fake/fake_priorityclass.go | 30 +-
.../typed/scheduling/v1/priorityclass.go | 40 +-
.../v1alpha1/fake/fake_priorityclass.go | 30 +-
.../scheduling/v1alpha1/priorityclass.go | 40 +-
.../v1beta1/fake/fake_priorityclass.go | 30 +-
.../typed/scheduling/v1beta1/priorityclass.go | 40 +-
.../kubernetes/typed/storage/v1/csidriver.go | 40 +-
.../kubernetes/typed/storage/v1/csinode.go | 40 +-
.../typed/storage/v1/csistoragecapacity.go | 41 +-
.../typed/storage/v1/fake/fake_csidriver.go | 30 +-
.../typed/storage/v1/fake/fake_csinode.go | 30 +-
.../v1/fake/fake_csistoragecapacity.go | 30 +-
.../storage/v1/fake/fake_storageclass.go | 30 +-
.../storage/v1/fake/fake_volumeattachment.go | 42 +-
.../typed/storage/v1/storageclass.go | 40 +-
.../typed/storage/v1/volumeattachment.go | 40 +-
.../storage/v1alpha1/csistoragecapacity.go | 41 +-
.../v1alpha1/fake/fake_csistoragecapacity.go | 30 +-
.../v1alpha1/fake/fake_volumeattachment.go | 42 +-
.../fake/fake_volumeattributesclass.go | 30 +-
.../storage/v1alpha1/volumeattachment.go | 40 +-
.../storage/v1alpha1/volumeattributesclass.go | 40 +-
.../typed/storage/v1beta1/csidriver.go | 40 +-
.../typed/storage/v1beta1/csinode.go | 40 +-
.../storage/v1beta1/csistoragecapacity.go | 41 +-
.../storage/v1beta1/fake/fake_csidriver.go | 30 +-
.../storage/v1beta1/fake/fake_csinode.go | 30 +-
.../v1beta1/fake/fake_csistoragecapacity.go | 30 +-
.../storage/v1beta1/fake/fake_storageclass.go | 30 +-
.../v1beta1/fake/fake_volumeattachment.go | 42 +-
.../typed/storage/v1beta1/storageclass.go | 40 +-
.../typed/storage/v1beta1/volumeattachment.go | 40 +-
.../fake/fake_storageversionmigration.go | 42 +-
.../v1alpha1/storageversionmigration.go | 40 +-
.../v1/mutatingwebhookconfiguration.go | 26 +-
.../v1/validatingadmissionpolicy.go | 26 +-
.../v1/validatingadmissionpolicybinding.go | 26 +-
.../v1/validatingwebhookconfiguration.go | 26 +-
.../v1alpha1/validatingadmissionpolicy.go | 26 +-
.../validatingadmissionpolicybinding.go | 26 +-
.../v1beta1/mutatingwebhookconfiguration.go | 26 +-
.../v1beta1/validatingadmissionpolicy.go | 26 +-
.../validatingadmissionpolicybinding.go | 26 +-
.../v1beta1/validatingwebhookconfiguration.go | 26 +-
.../v1alpha1/storageversion.go | 26 +-
.../listers/apps/v1/controllerrevision.go | 39 +-
.../client-go/listers/apps/v1/daemonset.go | 39 +-
.../client-go/listers/apps/v1/deployment.go | 39 +-
.../client-go/listers/apps/v1/replicaset.go | 39 +-
.../client-go/listers/apps/v1/statefulset.go | 39 +-
.../apps/v1beta1/controllerrevision.go | 39 +-
.../listers/apps/v1beta1/deployment.go | 39 +-
.../listers/apps/v1beta1/statefulset.go | 39 +-
.../apps/v1beta2/controllerrevision.go | 39 +-
.../listers/apps/v1beta2/daemonset.go | 39 +-
.../listers/apps/v1beta2/deployment.go | 39 +-
.../listers/apps/v1beta2/replicaset.go | 39 +-
.../listers/apps/v1beta2/statefulset.go | 39 +-
.../autoscaling/v1/horizontalpodautoscaler.go | 39 +-
.../autoscaling/v2/horizontalpodautoscaler.go | 39 +-
.../v2beta1/horizontalpodautoscaler.go | 39 +-
.../v2beta2/horizontalpodautoscaler.go | 39 +-
.../client-go/listers/batch/v1/cronjob.go | 39 +-
.../k8s.io/client-go/listers/batch/v1/job.go | 39 +-
.../listers/batch/v1beta1/cronjob.go | 39 +-
.../v1/certificatesigningrequest.go | 26 +-
.../v1alpha1/clustertrustbundle.go | 26 +-
.../v1beta1/certificatesigningrequest.go | 26 +-
.../listers/coordination/v1/lease.go | 39 +-
.../listers/coordination/v1beta1/lease.go | 39 +-
.../listers/core/v1/componentstatus.go | 26 +-
.../client-go/listers/core/v1/configmap.go | 39 +-
.../client-go/listers/core/v1/endpoints.go | 39 +-
.../k8s.io/client-go/listers/core/v1/event.go | 39 +-
.../client-go/listers/core/v1/limitrange.go | 39 +-
.../client-go/listers/core/v1/namespace.go | 26 +-
.../k8s.io/client-go/listers/core/v1/node.go | 26 +-
.../listers/core/v1/persistentvolume.go | 26 +-
.../listers/core/v1/persistentvolumeclaim.go | 39 +-
.../k8s.io/client-go/listers/core/v1/pod.go | 39 +-
.../client-go/listers/core/v1/podtemplate.go | 39 +-
.../listers/core/v1/replicationcontroller.go | 39 +-
.../listers/core/v1/resourcequota.go | 39 +-
.../client-go/listers/core/v1/secret.go | 39 +-
.../client-go/listers/core/v1/service.go | 39 +-
.../listers/core/v1/serviceaccount.go | 39 +-
.../listers/discovery/v1/endpointslice.go | 39 +-
.../discovery/v1beta1/endpointslice.go | 39 +-
.../client-go/listers}/doc.go | 7 +-
.../client-go/listers/events/v1/event.go | 39 +-
.../client-go/listers/events/v1beta1/event.go | 39 +-
.../listers/extensions/v1beta1/daemonset.go | 39 +-
.../listers/extensions/v1beta1/deployment.go | 39 +-
.../listers/extensions/v1beta1/ingress.go | 39 +-
.../extensions/v1beta1/networkpolicy.go | 39 +-
.../listers/extensions/v1beta1/replicaset.go | 39 +-
.../listers/flowcontrol/v1/flowschema.go | 26 +-
.../v1/prioritylevelconfiguration.go | 26 +-
.../listers/flowcontrol/v1beta1/flowschema.go | 26 +-
.../v1beta1/prioritylevelconfiguration.go | 26 +-
.../listers/flowcontrol/v1beta2/flowschema.go | 26 +-
.../v1beta2/prioritylevelconfiguration.go | 26 +-
.../listers/flowcontrol/v1beta3/flowschema.go | 26 +-
.../v1beta3/prioritylevelconfiguration.go | 26 +-
.../client-go/listers/generic_helpers.go | 72 +
.../listers/networking/v1/ingress.go | 39 +-
.../listers/networking/v1/ingressclass.go | 26 +-
.../listers/networking/v1/networkpolicy.go | 39 +-
.../listers/networking/v1alpha1/ipaddress.go | 26 +-
.../networking/v1alpha1/servicecidr.go | 26 +-
.../listers/networking/v1beta1/ingress.go | 39 +-
.../networking/v1beta1/ingressclass.go | 26 +-
.../client-go/listers/node/v1/runtimeclass.go | 26 +-
.../listers/node/v1alpha1/runtimeclass.go | 26 +-
.../listers/node/v1beta1/runtimeclass.go | 26 +-
.../client-go/listers/policy/v1/eviction.go | 39 +-
.../listers/policy/v1/poddisruptionbudget.go | 39 +-
.../listers/policy/v1beta1/eviction.go | 39 +-
.../policy/v1beta1/poddisruptionbudget.go | 39 +-
.../client-go/listers/rbac/v1/clusterrole.go | 26 +-
.../listers/rbac/v1/clusterrolebinding.go | 26 +-
.../k8s.io/client-go/listers/rbac/v1/role.go | 39 +-
.../client-go/listers/rbac/v1/rolebinding.go | 39 +-
.../listers/rbac/v1alpha1/clusterrole.go | 26 +-
.../rbac/v1alpha1/clusterrolebinding.go | 26 +-
.../client-go/listers/rbac/v1alpha1/role.go | 39 +-
.../listers/rbac/v1alpha1/rolebinding.go | 39 +-
.../listers/rbac/v1beta1/clusterrole.go | 26 +-
.../rbac/v1beta1/clusterrolebinding.go | 26 +-
.../client-go/listers/rbac/v1beta1/role.go | 39 +-
.../listers/rbac/v1beta1/rolebinding.go | 39 +-
.../resource/v1alpha2/podschedulingcontext.go | 39 +-
.../resource/v1alpha2/resourceclaim.go | 39 +-
.../v1alpha2/resourceclaimparameters.go | 39 +-
.../v1alpha2/resourceclaimtemplate.go | 39 +-
.../resource/v1alpha2/resourceclass.go | 26 +-
.../v1alpha2/resourceclassparameters.go | 39 +-
.../resource/v1alpha2/resourceslice.go | 26 +-
.../listers/scheduling/v1/priorityclass.go | 26 +-
.../scheduling/v1alpha1/priorityclass.go | 26 +-
.../scheduling/v1beta1/priorityclass.go | 26 +-
.../client-go/listers/storage/v1/csidriver.go | 26 +-
.../client-go/listers/storage/v1/csinode.go | 26 +-
.../listers/storage/v1/csistoragecapacity.go | 39 +-
.../listers/storage/v1/storageclass.go | 26 +-
.../listers/storage/v1/volumeattachment.go | 26 +-
.../storage/v1alpha1/csistoragecapacity.go | 39 +-
.../storage/v1alpha1/volumeattachment.go | 26 +-
.../storage/v1alpha1/volumeattributesclass.go | 26 +-
.../listers/storage/v1beta1/csidriver.go | 26 +-
.../listers/storage/v1beta1/csinode.go | 26 +-
.../storage/v1beta1/csistoragecapacity.go | 39 +-
.../listers/storage/v1beta1/storageclass.go | 26 +-
.../storage/v1beta1/volumeattachment.go | 26 +-
.../v1alpha1/storageversionmigration.go | 26 +-
vendor/k8s.io/client-go/rest/request.go | 146 +
vendor/k8s.io/client-go/testing/fixture.go | 2 +-
.../client-go/tools/cache/controller.go | 132 +-
.../client-go/tools/cache/delta_fifo.go | 53 +-
.../k8s.io/client-go/tools/cache/listers.go | 6 +-
.../k8s.io/client-go/tools/cache/listwatch.go | 4 +
.../k8s.io/client-go/tools/cache/reflector.go | 253 +-
.../reflector_data_consistency_detector.go | 94 +-
.../client-go/tools/clientcmd/api/helpers.go | 5 +-
.../client-go/tools/clientcmd/config.go | 3 +-
.../tools/leaderelection/leaderelection.go | 4 +-
vendor/k8s.io/client-go/tools/record/event.go | 8 +-
.../client-go/transport/cert_rotation.go | 7 +-
.../data_consistency_detector.go | 146 +
.../list_data_consistency_detector.go | 70 +
.../watch_list_data_consistency_detector.go | 54 +
.../client-go/util/watchlist/watch_list.go | 82 +
.../util/workqueue/default_rate_limiters.go | 139 +-
.../util/workqueue/delaying_queue.go | 66 +-
.../k8s.io/client-go/util/workqueue/queue.go | 153 +-
.../util/workqueue/rate_limiting_queue.go | 64 +-
.../component-base/logs/api/v1/options.go | 1 +
.../logs/api/v1/options_no_slog.go} | 16 +-
.../logs/api/v1/options_slog.go | 37 +
.../metrics/testutil/metrics.go | 2 +-
vendor/k8s.io/component-base/tracing/utils.go | 3 +-
vendor/modules.txt | 39 +-
.../controller-runtime/pkg/cache/cache.go | 128 +-
.../pkg/cache/delegating_by_gvk_cache.go | 8 +
.../pkg/cache/informer_cache.go | 11 +
.../pkg/cache/internal/cache_reader.go | 57 +-
.../pkg/cache/internal/informers.go | 70 +-
.../pkg/cache/multi_namespace_cache.go | 49 +-
.../pkg/client/apiutil/apimachinery.go | 21 -
.../pkg/client/apiutil/errors.go | 54 +
.../pkg/client/apiutil/restmapper.go | 90 +-
.../controller-runtime/pkg/client/client.go | 22 +-
.../pkg/client/fake/client.go | 175 +-
.../pkg/client/fieldowner.go | 106 +
.../pkg/client/interfaces.go | 1 +
.../controller-runtime/pkg/client/options.go | 5 +-
.../controller-runtime/pkg/config/config.go | 112 -
.../pkg/config/v1alpha1/register.go | 43 -
.../pkg/config/v1alpha1/types.go | 179 -
.../config/v1alpha1/zz_generated.deepcopy.go | 157 -
.../pkg/controller/controller.go | 40 +-
.../controllerutil/controllerutil.go | 114 +-
.../controller-runtime/pkg/event/event.go | 51 +-
.../controller-runtime/pkg/handler/enqueue.go | 43 +-
.../pkg/handler/enqueue_mapped.go | 42 +-
.../pkg/handler/enqueue_owner.go | 48 +-
.../pkg/handler/eventhandler.go | 62 +-
.../pkg/internal/controller/controller.go | 35 +-
.../pkg/internal/field/selector/utils.go | 16 +-
.../pkg/internal/source/event_handler.go | 32 +-
.../pkg/internal/source/kind.go | 58 +-
.../pkg/internal/syncs/syncs.go | 38 +
.../pkg/manager/internal.go | 55 +-
.../controller-runtime/pkg/manager/manager.go | 156 +-
.../pkg/manager/runnable_group.go | 20 +-
.../controller-runtime/pkg/manager/server.go | 74 +-
.../pkg/metrics/leaderelection.go | 23 +-
.../pkg/metrics/server/server.go | 32 +-
.../pkg/metrics/workqueue.go | 4 +-
.../pkg/predicate/predicate.go | 179 +-
.../pkg/reconcile/reconcile.go | 34 +-
.../controller-runtime/pkg/scheme/scheme.go | 93 -
.../controller-runtime/pkg/source/source.go | 124 +-
.../pkg/webhook/admission/decode.go | 25 +-
.../pkg/webhook/admission/defaulter.go | 4 +-
.../pkg/webhook/admission/defaulter_custom.go | 2 +-
.../pkg/webhook/admission/http.go | 52 +-
.../pkg/webhook/admission/validator.go | 4 +-
.../pkg/webhook/admission/validator_custom.go | 3 +-
.../controller-runtime/pkg/webhook/alias.go | 2 +
757 files changed, 31093 insertions(+), 23575 deletions(-)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn.go (94%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_deserialization_options.go (86%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_deserializer.go (97%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_simulator.go (66%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_state.go (65%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/atn_type.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/char_stream.go (89%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/common_token_factory.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/common_token_stream.go (88%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/comparators.go (82%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa.go (76%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa_serializer.go (97%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/dfa_state.go (81%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/diagnostic_error_listener.go (92%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/error_listener.go (62%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/error_strategy.go (58%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/errors.go (73%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/file_stream.go (52%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/int_stream.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/interval_set.go (82%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer.go (78%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_action.go (78%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_action_executor.go (70%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/lexer_atn_simulator.go (80%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/ll1_analyzer.go (72%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser.go (80%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser_atn_simulator.go (64%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/parser_rule_context.go (77%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/recognizer.go (70%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/semantic_context.go (92%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token.go (74%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token_source.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/token_stream.go (90%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/tokenstream_rewriter.go (73%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/trace_listener.go (100%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/transition.go (67%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/tree.go (62%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/trees.go (81%)
rename vendor/github.com/{antlr4-go => antlr/antlr4/runtime/Go}/antlr/v4/utils.go (74%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/.gitignore
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/LICENSE
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/README.md
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/configuration.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/input_stream.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/jcollect.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/rule_context.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/statistics.go
delete mode 100644 vendor/github.com/antlr4-go/antlr/v4/stats_data.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go
create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go
delete mode 100644 vendor/github.com/google/cel-go/cel/folding.go
delete mode 100644 vendor/github.com/google/cel-go/cel/inlining.go
delete mode 100644 vendor/github.com/google/cel-go/cel/optimizer.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/conversion.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/factory.go
delete mode 100644 vendor/github.com/google/cel-go/common/ast/navigable.go
delete mode 100644 vendor/github.com/google/cel-go/ext/formatting.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/formatting.go
create mode 100644 vendor/github.com/openshift/api/features/features.go
create mode 100644 vendor/github.com/openshift/api/features/util.go
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/README.md
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/errors.go
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/merge.go
create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/patch.go
create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
rename vendor/{sigs.k8s.io/controller-runtime/pkg/config => k8s.io/client-go/listers}/doc.go (76%)
create mode 100644 vendor/k8s.io/client-go/listers/generic_helpers.go
create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go
create mode 100644 vendor/k8s.io/client-go/util/watchlist/watch_list.go
rename vendor/{sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go => k8s.io/component-base/logs/api/v1/options_no_slog.go} (54%)
create mode 100644 vendor/k8s.io/component-base/logs/api/v1/options_slog.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go
delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
diff --git a/go.mod b/go.mod
index 18cc5c1d2c..779ed92957 100644
--- a/go.mod
+++ b/go.mod
@@ -21,14 +21,14 @@ require (
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
golang.org/x/net v0.26.0
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/api v0.30.2
- k8s.io/apimachinery v0.30.2
+ k8s.io/api v0.31.0-alpha.2
+ k8s.io/apimachinery v0.31.0-alpha.2 // replaced with v0.30.2 in replace() block below
k8s.io/code-generator v0.30.2
- k8s.io/component-base v0.30.2
+ k8s.io/component-base v0.31.0-alpha.2 // v0.30.2 uses outdated and build breaking type from prometheus. update in v0.31
k8s.io/klog/v2 v2.130.1
k8s.io/kube-proxy v0.30.2
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0
- sigs.k8s.io/controller-runtime v0.16.0
+ sigs.k8s.io/controller-runtime v0.18.4
)
require (
@@ -114,13 +114,13 @@ require (
github.com/openshift/library-go v0.0.0-20240621150525-4bb4238aef81
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc
k8s.io/apiextensions-apiserver v0.30.2
- k8s.io/client-go v0.30.2
+ k8s.io/client-go v0.31.0-alpha.2
)
require (
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.44.204 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@@ -133,7 +133,7 @@ require (
github.com/coreos/ignition/v2 v2.15.0 // indirect
github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch/v5 v5.6.0 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/cel-go v0.20.1 // indirect
@@ -141,7 +141,6 @@ require (
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
- github.com/onsi/ginkgo/v2 v2.17.3 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
@@ -153,8 +152,14 @@ require (
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
k8s.io/apiserver v0.30.2 // indirect
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313 // indirect
k8s.io/kms v0.30.2 // indirect
k8s.io/kube-aggregator v0.30.1 // indirect
)
+
+replace (
+ github.com/google/cel-go => github.com/google/cel-go v0.17.8
+ k8s.io/apimachinery => k8s.io/apimachinery v0.30.2
+)
diff --git a/go.sum b/go.sum
index e418e63482..e09774c461 100644
--- a/go.sum
+++ b/go.sum
@@ -31,8 +31,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc=
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
-github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
-github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c h1:icme0QhxrgZOxTBnT6K8dfGLwbKWSOVwPB95XTbo8Ws=
@@ -94,8 +94,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
-github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
@@ -159,8 +159,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
-github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
+github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
+github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -208,7 +208,6 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
@@ -252,8 +251,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
-github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
+github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
@@ -269,7 +268,6 @@ github.com/openshift/library-go v0.0.0-20240621150525-4bb4238aef81/go.mod h1:PdA
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc h1:m8c26gPEv0p621926Kl26kYQvnkOrW7pOirvYzORn24=
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc/go.mod h1:ftCpVtU6Q31exB0DTBn9s2eu90RJESOSisNAruWkvcE=
github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=
@@ -612,6 +610,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
@@ -635,20 +635,20 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
+k8s.io/api v0.31.0-alpha.2 h1:azMbpAFERqtGmgDtg/f7efnxgPBW+8ieyHNKxT97EMI=
+k8s.io/api v0.31.0-alpha.2/go.mod h1:S1X5UjUV8NZmR1vmKIkUpruhr0AWAvocZVZ5zxKMvi4=
k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw=
k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/client-go v0.31.0-alpha.2 h1:13UCBphjOLcqQ1ROBA+y9sr9Bmc/Ss1ypHQEDb6uKas=
+k8s.io/client-go v0.31.0-alpha.2/go.mod h1:wF4N5QBYqOoXntvUsYd5eyfDLqskc/UNDyEF6WvaFIk=
k8s.io/code-generator v0.30.2 h1:ZY1+aGkqZVwKIyGsOzquaeZ5rSfE6wZHur8z3jQAaiw=
k8s.io/code-generator v0.30.2/go.mod h1:RQP5L67QxqgkVquk704CyvWFIq0e6RCMmLTXxjE8dVA=
-k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII=
-k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE=
+k8s.io/component-base v0.31.0-alpha.2 h1:bAYhaSt++Mf7x0042QkeKJpzOuMq3KP7WGiLIM2hBcA=
+k8s.io/component-base v0.31.0-alpha.2/go.mod h1:4RdlW5OL0oab6gWaGWjxIcgORwuiuO49gV2GSxJ/9io=
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313 h1:bKcdZJOPICVmIIuaM9+MXmapE94dn5AYv5ODs1jA43o=
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
@@ -668,8 +668,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
-sigs.k8s.io/controller-runtime v0.16.0 h1:5koYaaRVBHDr0LZAJjO5dWzUjMsh6cwa7q1Mmusrdvk=
-sigs.k8s.io/controller-runtime v0.16.0/go.mod h1:77DnuwA8+J7AO0njzv3wbNlMOnGuLrwFr8JPNwx3J7g=
+sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
+sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek=
diff --git a/pkg/client/operatorclient.go b/pkg/client/operatorclient.go
index 9a6ef7ea4b..a4963563c8 100644
--- a/pkg/client/operatorclient.go
+++ b/pkg/client/operatorclient.go
@@ -78,3 +78,12 @@ func (c *OperatorHelperClient) UpdateOperatorStatus(ctx context.Context, resourc
return &ret.Status.OperatorStatus, nil
}
+
+func (c *OperatorHelperClient) GetOperatorStateWithQuorum(ctx context.Context) (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) {
+ instance, err := c.client.Get(ctx, names.OPERATOR_CONFIG, metav1.GetOptions{})
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ return &instance.Spec.OperatorSpec, &instance.Status.OperatorStatus, instance.ResourceVersion, nil
+}
diff --git a/pkg/controller/allowlist/allowlist_controller.go b/pkg/controller/allowlist/allowlist_controller.go
index aebbe7668e..beadf2400e 100644
--- a/pkg/controller/allowlist/allowlist_controller.go
+++ b/pkg/controller/allowlist/allowlist_controller.go
@@ -64,16 +64,18 @@ func add(mgr manager.Manager, r *ReconcileAllowlist) error {
r.client.Default().AddCustomInformer(cmInformer) // Tell the ClusterClient about this informer
- return c.Watch(&source.Informer{Informer: cmInformer},
- &handler.EnqueueRequestForObject{},
- predicate.ResourceVersionChangedPredicate{},
- predicate.NewPredicateFuncs(func(object crclient.Object) bool {
- // Only care about cni-sysctl-allowlist, but also watching for default-cni-sysctl-allowlist
- // as a trigger for creating cni-sysctl-allowlist if it doesn't exist
- return (strings.Contains(object.GetName(), names.ALLOWLIST_CONFIG_NAME))
-
- }),
- )
+ return c.Watch(&source.Informer{
+ Informer: cmInformer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.ResourceVersionChangedPredicate{},
+ predicate.NewPredicateFuncs(func(object crclient.Object) bool {
+ // Only care about cni-sysctl-allowlist, but also watching for default-cni-sysctl-allowlist
+ // as a trigger for creating cni-sysctl-allowlist if it doesn't exist
+ return (strings.Contains(object.GetName(), names.ALLOWLIST_CONFIG_NAME))
+ }),
+ },
+ })
}
var _ reconcile.Reconciler = &ReconcileAllowlist{}
diff --git a/pkg/controller/clusterconfig/clusterconfig_controller.go b/pkg/controller/clusterconfig/clusterconfig_controller.go
index d767ab579e..60f2b5ec05 100644
--- a/pkg/controller/clusterconfig/clusterconfig_controller.go
+++ b/pkg/controller/clusterconfig/clusterconfig_controller.go
@@ -42,8 +42,19 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}
+ informer, err := mgr.GetCache().GetInformer(context.Background(), &configv1.Network{})
+ if err != nil {
+ return err
+ }
+
// Watch for changes to primary resource config.openshift.io/v1/Network
- err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Network{}), &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{})
+ err = c.Watch(&source.Informer{
+ Informer: informer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.GenerationChangedPredicate{},
+ },
+ })
if err != nil {
return err
}
diff --git a/pkg/controller/configmap_ca_injector/controller.go b/pkg/controller/configmap_ca_injector/controller.go
index 1e5afbf8c8..6a43f19344 100644
--- a/pkg/controller/configmap_ca_injector/controller.go
+++ b/pkg/controller/configmap_ca_injector/controller.go
@@ -81,16 +81,22 @@ func add(mgr manager.Manager, r *ReconcileConfigMapInjector) error {
}
// Wire up the informers to the queue
- if err := c.Watch(&source.Informer{Informer: r.labelInformer},
- &handler.EnqueueRequestForObject{},
- predicate.ResourceVersionChangedPredicate{},
- ); err != nil {
+ if err := c.Watch(&source.Informer{
+ Informer: r.labelInformer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.ResourceVersionChangedPredicate{},
+ },
+ }); err != nil {
return err
}
- if err := c.Watch(&source.Informer{Informer: r.nsInformer},
- &handler.EnqueueRequestForObject{},
- predicate.NewPredicateFuncs(isCABundle),
- ); err != nil {
+ if err := c.Watch(&source.Informer{
+ Informer: r.nsInformer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.NewPredicateFuncs(isCABundle),
+ },
+ }); err != nil {
return err
}
diff --git a/pkg/controller/connectivitycheck/connectivity_check_controller.go b/pkg/controller/connectivitycheck/connectivity_check_controller.go
index ba8d60e7e9..a1a14a9a63 100644
--- a/pkg/controller/connectivitycheck/connectivity_check_controller.go
+++ b/pkg/controller/connectivitycheck/connectivity_check_controller.go
@@ -206,7 +206,14 @@ func (c *connectivityCheckTemplateProvider) generate(ctx context.Context, syncCo
if c.connectivityChecksStatus != currentStatus {
condition := currentStatus
condition.LastTransitionTime = metav1.NewTime(time.Now())
- netConfig := applyconfigv1.Network(names.CLUSTER_CONFIG).WithStatus(applyconfigv1.NetworkStatus().WithConditions(condition))
+ applyCondition := &applyconfigmetav1.ConditionApplyConfiguration{
+ Type: &condition.Type,
+ Status: &condition.Status,
+ LastTransitionTime: &condition.LastTransitionTime,
+ Reason: &condition.Reason,
+ Message: &condition.Message,
+ }
+ netConfig := applyconfigv1.Network(names.CLUSTER_CONFIG).WithStatus(applyconfigv1.NetworkStatus().WithConditions(applyCondition))
_, err := c.configClient.ConfigV1().Networks().Apply(context.TODO(), netConfig, metav1.ApplyOptions{
Force: true,
FieldManager: "cluster-network-operator/connectivity-check-controller",
diff --git a/pkg/controller/dashboards/dashboard_controller.go b/pkg/controller/dashboards/dashboard_controller.go
index 1c5bb03c97..12670ea790 100644
--- a/pkg/controller/dashboards/dashboard_controller.go
+++ b/pkg/controller/dashboards/dashboard_controller.go
@@ -83,22 +83,25 @@ func add(mgr manager.Manager, r *ReconcileDashboard) error {
r.client.Default().AddCustomInformer(cmInformer) // Tell the ClusterClient about this informer
firstRun := true
- return c.Watch(&source.Informer{Informer: cmInformer},
- &handler.EnqueueRequestForObject{},
- predicate.ResourceVersionChangedPredicate{},
- predicate.NewPredicateFuncs(func(object crclient.Object) bool {
- if firstRun {
- firstRun = false
- return true
- }
- for _, ref := range dashboardRefs {
- if object.GetName() == ref.name {
+ return c.Watch(&source.Informer{
+ Informer: cmInformer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.ResourceVersionChangedPredicate{},
+ predicate.NewPredicateFuncs(func(object crclient.Object) bool {
+ if firstRun {
+ firstRun = false
return true
}
- }
- return false
- }),
- )
+ for _, ref := range dashboardRefs {
+ if object.GetName() == ref.name {
+ return true
+ }
+ }
+ return false
+ }),
+ },
+ })
}
var _ reconcile.Reconciler = &ReconcileDashboard{}
diff --git a/pkg/controller/egress_router/egress_router_controller.go b/pkg/controller/egress_router/egress_router_controller.go
index b86cca586b..727ac2e2bf 100644
--- a/pkg/controller/egress_router/egress_router_controller.go
+++ b/pkg/controller/egress_router/egress_router_controller.go
@@ -50,7 +50,7 @@ func Add(mgr manager.Manager, status *statusmanager.StatusManager, cli cnoclient
}
// Watch for changes to primary resource EgressRouter.network.operator.openshift.io/v1
- err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.EgressRouter{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.EgressRouter{}, &handler.TypedEnqueueRequestForObject[*netopv1.EgressRouter]{}))
if err != nil {
return err
}
diff --git a/pkg/controller/infrastructureconfig/infrastructureconfig_controller.go b/pkg/controller/infrastructureconfig/infrastructureconfig_controller.go
index 7d1cb54b4d..39a4f53519 100644
--- a/pkg/controller/infrastructureconfig/infrastructureconfig_controller.go
+++ b/pkg/controller/infrastructureconfig/infrastructureconfig_controller.go
@@ -5,6 +5,7 @@ import (
"fmt"
"log"
"reflect"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-network-operator/pkg/apply"
@@ -48,8 +49,18 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}
+ informer, err := mgr.GetCache().GetInformer(context.Background(), &configv1.Network{})
+ if err != nil {
+ return err
+ }
// Watch for changes to primary resource config.openshift.io/v1/Infrastructure
- err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Infrastructure{}), &handler.EnqueueRequestForObject{}, onPremPlatformPredicate())
+ err = c.Watch(&source.Informer{
+ Informer: informer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ onPremPlatformPredicate(),
+ },
+ })
if err != nil {
return err
}
diff --git a/pkg/controller/ingressconfig/ingressconfig_controller.go b/pkg/controller/ingressconfig/ingressconfig_controller.go
index 6acb0afeae..6d45f19ed4 100644
--- a/pkg/controller/ingressconfig/ingressconfig_controller.go
+++ b/pkg/controller/ingressconfig/ingressconfig_controller.go
@@ -52,7 +52,7 @@ func add(mgr manager.Manager, r *ReconcileIngressConfigs) error {
if err != nil {
return err
}
- err = c.Watch(source.Kind(mgr.GetCache(), &operv1.IngressController{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &operv1.IngressController{}, &handler.TypedEnqueueRequestForObject[*operv1.IngressController]{}))
if err != nil {
return err
}
diff --git a/pkg/controller/operconfig/cluster.go b/pkg/controller/operconfig/cluster.go
index 611b971f3f..9446abb2ba 100644
--- a/pkg/controller/operconfig/cluster.go
+++ b/pkg/controller/operconfig/cluster.go
@@ -3,6 +3,7 @@ package operconfig
import (
"context"
"fmt"
+ apifeatures "github.com/openshift/api/features"
"log"
"reflect"
@@ -24,7 +25,7 @@ import (
// MergeClusterConfig merges in the existing cluster config in to the
// operator config, overwriting any changes to the managed fields.
func (r *ReconcileOperConfig) MergeClusterConfig(ctx context.Context, operConfig *operv1.Network, clusterConfig *configv1.Network) error {
- if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(configv1.FeatureGateNetworkLiveMigration) {
+ if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(apifeatures.FeatureGateNetworkLiveMigration) {
// During network type live migration, all the update to network.operator shall only be handled by the clusterconfig controller
return nil
}
@@ -93,7 +94,7 @@ func (r *ReconcileOperConfig) ClusterNetworkStatus(ctx context.Context, operConf
// Sync status.conditions when live migration is processing
clusterConfigWithConditions := clusterConfig.DeepCopy()
nowTimestamp := metav1.Now()
- if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(configv1.FeatureGateNetworkLiveMigration) {
+ if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(apifeatures.FeatureGateNetworkLiveMigration) {
if meta.IsStatusConditionPresentAndEqual(clusterConfig.Status.Conditions, names.NetworkTypeMigrationInProgress, metav1.ConditionTrue) {
err = r.syncNetworkTypeMigrationConditions(ctx, operConfig, clusterConfigWithConditions)
if err != nil {
diff --git a/pkg/controller/operconfig/operconfig_controller.go b/pkg/controller/operconfig/operconfig_controller.go
index 8e00c5d49e..bfff2001f2 100644
--- a/pkg/controller/operconfig/operconfig_controller.go
+++ b/pkg/controller/operconfig/operconfig_controller.go
@@ -133,21 +133,32 @@ func add(mgr manager.Manager, r *ReconcileOperConfig) error {
return err
}
+ informer, err := mgr.GetCache().GetInformer(context.Background(), &configv1.Network{})
+ if err != nil {
+ return err
+ }
+
// Watch for changes to networkDiagnostics in network.config
- err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Network{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
- UpdateFunc: func(evt event.UpdateEvent) bool {
- old, ok := evt.ObjectOld.(*configv1.Network)
- if !ok {
- return true
- }
- new, ok := evt.ObjectNew.(*configv1.Network)
- if !ok {
- return true
- }
- if reflect.DeepEqual(old.Spec.NetworkDiagnostics, new.Spec.NetworkDiagnostics) {
- return false
- }
- return true
+ err = c.Watch(&source.Informer{
+ Informer: informer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.Funcs{
+ UpdateFunc: func(evt event.UpdateEvent) bool {
+ old, ok := evt.ObjectOld.(*configv1.Network)
+ if !ok {
+ return true
+ }
+ new, ok := evt.ObjectNew.(*configv1.Network)
+ if !ok {
+ return true
+ }
+ if reflect.DeepEqual(old.Spec.NetworkDiagnostics, new.Spec.NetworkDiagnostics) {
+ return false
+ }
+ return true
+ },
+ },
},
})
if err != nil {
@@ -155,21 +166,27 @@ func add(mgr manager.Manager, r *ReconcileOperConfig) error {
}
// Watch for changes to primary resource Network (as long as the spec changes)
- err = c.Watch(source.Kind(mgr.GetCache(), &operv1.Network{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
- UpdateFunc: func(evt event.UpdateEvent) bool {
- old, ok := evt.ObjectOld.(*operv1.Network)
- if !ok {
- return true
- }
- new, ok := evt.ObjectNew.(*operv1.Network)
- if !ok {
- return true
- }
- if reflect.DeepEqual(old.Spec, new.Spec) {
- log.Printf("Skipping reconcile of Network.operator.openshift.io: spec unchanged")
- return false
- }
- return true
+ err = c.Watch(&source.Informer{
+ Informer: informer,
+ Handler: &handler.EnqueueRequestForObject{},
+ Predicates: []predicate.Predicate{
+ predicate.Funcs{
+ UpdateFunc: func(evt event.UpdateEvent) bool {
+ old, ok := evt.ObjectOld.(*operv1.Network)
+ if !ok {
+ return true
+ }
+ new, ok := evt.ObjectNew.(*operv1.Network)
+ if !ok {
+ return true
+ }
+ if reflect.DeepEqual(old.Spec, new.Spec) {
+ log.Printf("Skipping reconcile of Network.operator.openshift.io: spec unchanged")
+ return false
+ }
+ return true
+ },
+ },
},
})
if err != nil {
@@ -187,15 +204,18 @@ func add(mgr manager.Manager, r *ReconcileOperConfig) error {
r.client.Default().AddCustomInformer(cmInformer) // Tell the ClusterClient about this informer
- if err := c.Watch(&source.Informer{Informer: cmInformer},
- handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
- predicate.ResourceVersionChangedPredicate{},
- predicate.NewPredicateFuncs(func(object crclient.Object) bool {
- // Ignore ConfigMaps we manage as part of this loop
- return !(object.GetName() == "network-operator-lock" ||
- object.GetName() == "applied-cluster")
- }),
- ); err != nil {
+ if err := c.Watch(&source.Informer{
+ Informer: cmInformer,
+ Handler: handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
+ Predicates: []predicate.Predicate{
+ predicate.ResourceVersionChangedPredicate{},
+ predicate.NewPredicateFuncs(func(object crclient.Object) bool {
+ // Ignore ConfigMaps we manage as part of this loop
+ return !(object.GetName() == "network-operator-lock" ||
+ object.GetName() == "applied-cluster")
+ }),
+ },
+ }); err != nil {
return err
}
@@ -218,11 +238,13 @@ func add(mgr manager.Manager, r *ReconcileOperConfig) error {
return true
},
}
- if err := c.Watch(
- source.Kind(mgr.GetCache(), &corev1.Node{}),
- handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
- nodePredicate,
- ); err != nil {
+ if err := c.Watch(&source.Informer{
+ Informer: informer,
+ Handler: handler.TypedEnqueueRequestsFromMapFunc(reconcileOperConfig),
+ Predicates: []predicate.Predicate{
+ nodePredicate,
+ },
+ }); err != nil {
return err
}
diff --git a/pkg/controller/pki/pki_controller.go b/pkg/controller/pki/pki_controller.go
index 1b21996883..e7fb5c3e5d 100644
--- a/pkg/controller/pki/pki_controller.go
+++ b/pkg/controller/pki/pki_controller.go
@@ -56,7 +56,7 @@ func Add(mgr manager.Manager, status *statusmanager.StatusManager, _ cnoclient.C
}
// Watch for changes to primary resource PKI.network.operator.openshift.io/v1
- err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.OperatorPKI{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.OperatorPKI{}, &handler.TypedEnqueueRequestForObject[*netopv1.OperatorPKI]{}))
if err != nil {
return err
}
@@ -193,9 +193,11 @@ func newPKI(config *netopv1.OperatorPKI, clientset *kubernetes.Clientset, mgr ma
cont := certrotation.NewCertRotationController(
fmt.Sprintf("%s/%s", config.Namespace, config.Name), // name, not really used
certrotation.RotatedSigningCASecret{
- Namespace: config.Namespace,
- Name: config.Name + "-ca",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ Namespace: config.Namespace,
+ Name: config.Name + "-ca",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
Validity: 10 * OneYear,
Refresh: 9 * OneYear,
Informer: inf.Core().V1().Secrets(),
@@ -204,23 +206,26 @@ func newPKI(config *netopv1.OperatorPKI, clientset *kubernetes.Clientset, mgr ma
EventRecorder: &eventrecorder.LoggingRecorder{},
},
certrotation.CABundleConfigMap{
- Namespace: config.Namespace,
- Name: config.Name + "-ca",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ Namespace: config.Namespace,
+ Name: config.Name + "-ca",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
Lister: inf.Core().V1().ConfigMaps().Lister(),
Informer: inf.Core().V1().ConfigMaps(),
Client: clientset.CoreV1(),
EventRecorder: &eventrecorder.LoggingRecorder{},
},
certrotation.RotatedSelfSignedCertKeySecret{
- Namespace: config.Namespace,
- Name: config.Name + "-cert",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
- Validity: OneYear / 2,
- Refresh: OneYear / 4,
+ Namespace: config.Namespace,
+ Name: config.Name + "-cert",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
+ Validity: OneYear / 2,
+ Refresh: OneYear / 4,
CertCreator: &certrotation.ServingRotation{
Hostnames: func() []string { return []string{spec.TargetCert.CommonName} },
-
// Force the certificate to also be client
CertificateExtensionFn: []crypto.CertificateExtensionFunc{
toClientCert,
diff --git a/pkg/controller/proxyconfig/controller.go b/pkg/controller/proxyconfig/controller.go
index cfded73057..e0172f0247 100644
--- a/pkg/controller/proxyconfig/controller.go
+++ b/pkg/controller/proxyconfig/controller.go
@@ -65,14 +65,16 @@ func add(mgr manager.Manager, r *ReconcileProxyConfig) error {
}
// watch for changes to configmaps in openshift-config
- if err := c.Watch(&source.Informer{Informer: r.cmInformer},
- &handler.EnqueueRequestForObject{},
+ if err := c.Watch(&source.Informer{
+ Informer: r.cmInformer,
+ Handler: &handler.EnqueueRequestForObject{},
+ },
); err != nil {
return err
}
// Watch for changes to the proxy resource.
- err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Proxy{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Proxy{}, &handler.TypedEnqueueRequestForObject[*configv1.Proxy]{}))
if err != nil {
return err
}
diff --git a/pkg/controller/signer/signer-controller.go b/pkg/controller/signer/signer-controller.go
index 110a086c73..b224ccd171 100644
--- a/pkg/controller/signer/signer-controller.go
+++ b/pkg/controller/signer/signer-controller.go
@@ -57,7 +57,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
}
// Watch for changes to CetificateSigningRequest resource
- err = c.Watch(source.Kind(mgr.GetCache(), &csrv1.CertificateSigningRequest{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &csrv1.CertificateSigningRequest{}, &handler.TypedEnqueueRequestForObject[*csrv1.CertificateSigningRequest]{}))
if err != nil {
return err
}
diff --git a/pkg/controller/statusmanager/pod_watcher.go b/pkg/controller/statusmanager/pod_watcher.go
index 659626edc5..8eeed8ff4d 100644
--- a/pkg/controller/statusmanager/pod_watcher.go
+++ b/pkg/controller/statusmanager/pod_watcher.go
@@ -105,9 +105,10 @@ func (s *StatusManager) AddPodWatcher(mgr manager.Manager) error {
}
for _, inf := range infs {
- if err := c.Watch(&source.Informer{Informer: inf},
- handler.EnqueueRequestsFromMapFunc(enqueueRP),
- ); err != nil {
+ if err := c.Watch(&source.Informer{
+ Informer: inf,
+ Handler: handler.EnqueueRequestsFromMapFunc(enqueueRP),
+ }); err != nil {
return err
}
}
diff --git a/pkg/network/ovn_kubernetes.go b/pkg/network/ovn_kubernetes.go
index 2b7679d23c..a4d592c8b3 100644
--- a/pkg/network/ovn_kubernetes.go
+++ b/pkg/network/ovn_kubernetes.go
@@ -19,6 +19,7 @@ import (
yaml "github.com/ghodss/yaml"
configv1 "github.com/openshift/api/config/v1"
+ apifeatures "github.com/openshift/api/features"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/bootstrap"
cnoclient "github.com/openshift/cluster-network-operator/pkg/client"
@@ -304,8 +305,8 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
}
// leverage feature gates
- data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(configv1.FeatureGateAdminNetworkPolicy)
- data.Data["DNS_NAME_RESOLVER_ENABLE"] = featureGates.Enabled(configv1.FeatureGateDNSNameResolver)
+ data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateAdminNetworkPolicy)
+ data.Data["DNS_NAME_RESOLVER_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateDNSNameResolver)
data.Data["ReachabilityTotalTimeoutSeconds"] = c.EgressIPConfig.ReachabilityTotalTimeoutSeconds
diff --git a/pkg/network/ovn_kubernetes_test.go b/pkg/network/ovn_kubernetes_test.go
index a1d64ae28b..bfaa8def09 100644
--- a/pkg/network/ovn_kubernetes_test.go
+++ b/pkg/network/ovn_kubernetes_test.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ apifeatures "github.com/openshift/api/features"
"strconv"
"strings"
"testing"
@@ -93,7 +94,7 @@ func TestRenderOVNKubernetes(t *testing.T) {
},
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -155,7 +156,7 @@ func TestRenderOVNKubernetesIPv6(t *testing.T) {
},
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -742,7 +743,7 @@ logfile-maxage=0`,
controlPlaneReplicaCount: 2,
enableMultiNetPolicies: true,
- enabledFeatureGates: []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy},
+ enabledFeatureGates: []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy},
},
{
desc: "enable multi-network policies without multi-network support",
@@ -786,7 +787,7 @@ logfile-maxage=0`,
controlPlaneReplicaCount: 2,
disableMultiNet: true,
enableMultiNetPolicies: true,
- enabledFeatureGates: []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy},
+ enabledFeatureGates: []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy},
},
{
desc: "enable dns-name resolver feature",
@@ -829,7 +830,7 @@ logfile-maxsize=100
logfile-maxbackups=5
logfile-maxage=0`,
controlPlaneReplicaCount: 2,
- enabledFeatureGates: []configv1.FeatureGateName{configv1.FeatureGateDNSNameResolver},
+ enabledFeatureGates: []configv1.FeatureGateName{apifeatures.FeatureGateDNSNameResolver},
},
}
g := NewGomegaWithT(t)
@@ -882,8 +883,8 @@ logfile-maxage=0`,
}
knownFeatureGates := []configv1.FeatureGateName{
- configv1.FeatureGateAdminNetworkPolicy,
- configv1.FeatureGateDNSNameResolver,
+ apifeatures.FeatureGateAdminNetworkPolicy,
+ apifeatures.FeatureGateDNSNameResolver,
}
s := sets.New[configv1.FeatureGateName](tc.enabledFeatureGates...)
enabled := []configv1.FeatureGateName{}
@@ -2042,7 +2043,7 @@ metadata:
PrePullerUpdateStatus: prepullerStatus,
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -2353,7 +2354,7 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) {
// At the 1st pass, ensure IPsec MachineConfigs are not rolled out until MCO is ready.
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -2580,7 +2581,7 @@ func TestRenderOVNKubernetesEnableIPsecForHostedControlPlane(t *testing.T) {
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -2702,7 +2703,7 @@ func TestRenderOVNKubernetesIPsecUpgradeWithMachineConfig(t *testing.T) {
bootstrapResult.Infra.WorkerMCPStatuses = []mcfgv1.MachineConfigPoolStatus{{MachineCount: 1, ReadyMachineCount: 1, UpdatedMachineCount: 1,
Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{Source: []v1.ObjectReference{{Name: workerMachineConfigIPsecExtName}}}}}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -2814,7 +2815,7 @@ func TestRenderOVNKubernetesIPsecUpgradeWithNoMachineConfig(t *testing.T) {
// Upgrade starts and it's going to rollout IPsec Machine Configs without making any changes into existing IPsec configs.
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -2982,7 +2983,7 @@ func TestRenderOVNKubernetesIPsecUpgradeWithHypershiftHostedCluster(t *testing.T
bootstrapResult.Infra.HostedControlPlane = &hypershift.HostedControlPlane{}
// Upgrade starts and it's going to get only ovn-ipsec-containerized DS.
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
@@ -3089,7 +3090,7 @@ func TestRenderOVNKubernetesDisableIPsec(t *testing.T) {
},
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
@@ -3229,7 +3230,7 @@ func TestRenderOVNKubernetesDisableIPsecWithUserInstalledIPsecMachineConfigs(t *
},
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
@@ -3364,7 +3365,7 @@ func TestRenderOVNKubernetesDualStackPrecedenceOverUpgrade(t *testing.T) {
},
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
@@ -3466,7 +3467,7 @@ func TestRenderOVNKubernetesOVSFlowsConfigMap(t *testing.T) {
FlowsConfig: tc.FlowsConfig,
}
featureGatesCNO := featuregates.NewFeatureGate(
- []configv1.FeatureGateName{configv1.FeatureGateAdminNetworkPolicy, configv1.FeatureGateDNSNameResolver},
+ []configv1.FeatureGateName{apifeatures.FeatureGateAdminNetworkPolicy, apifeatures.FeatureGateDNSNameResolver},
[]configv1.FeatureGateName{},
)
fakeClient := cnofake.NewFakeClient()
diff --git a/pkg/network/render.go b/pkg/network/render.go
index b73a582e52..fbc4761b28 100644
--- a/pkg/network/render.go
+++ b/pkg/network/render.go
@@ -1,6 +1,7 @@
package network
import (
+ apifeatures "github.com/openshift/api/features"
"log"
"net"
"os"
@@ -623,7 +624,7 @@ func renderCRDForMigration(conf *operv1.NetworkSpec, manifestDir string, feature
// the CRD installation can happen according to whether the feature gate is enabled or not
// in the cluster
data := render.MakeRenderData()
- data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(configv1.FeatureGateAdminNetworkPolicy)
+ data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateAdminNetworkPolicy)
manifests, err := render.RenderTemplate(filepath.Join(manifestDir, "network/ovn-kubernetes/common/001-crd.yaml"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render OVNKubernetes CRDs")
diff --git a/pkg/util/validation/network.go b/pkg/util/validation/network.go
index 359d02879e..b721eedc22 100644
--- a/pkg/util/validation/network.go
+++ b/pkg/util/validation/network.go
@@ -3,6 +3,7 @@ package validation
import (
"errors"
"fmt"
+ "k8s.io/apimachinery/pkg/util/validation/field"
"net"
"net/url"
"strconv"
@@ -40,7 +41,7 @@ func Subdomain(v string) error {
// Host validates if host is a valid IP address or subdomain in DNS (RFC 1123).
func Host(host string) error {
errDomain := DomainName(host, false)
- errIP := validation.IsValidIP(host)
+ errIP := validation.IsValidIP(field.NewPath(""), host)
if errDomain != nil && errIP != nil {
return fmt.Errorf("invalid host: %s", host)
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
new file mode 100644
index 0000000000..52cf18e425
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
new file mode 100644
index 0000000000..ab51212676
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
@@ -0,0 +1,68 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system.
+
+Here is a general template for an ANTLR based recognizer in Go:
+
+ .
+ ├── myproject
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.0-complete.jar
+ │ ├── error_listeners.go
+ │ ├── generate.go
+ │ ├── generate.sh
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options.
+
+From the command line at the root of your package “myproject” you can then simply issue the command:
+
+ go generate ./...
+
+# Copyright Notice
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+*/
+package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
similarity index 94%
rename from vendor/github.com/antlr4-go/antlr/v4/atn.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
index e749ebd0cf..98010d2e6e 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
@@ -4,6 +4,8 @@
package antlr
+import "sync"
+
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
var ATNInvalidAltNumber int
@@ -18,11 +20,10 @@ var ATNInvalidAltNumber int
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
-
- // DecisionToState is the decision points for all rules, sub-rules, optional
- // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
// can go back later and build DFA predictors for them. This includes
- // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ // all the rules, subrules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -50,13 +51,11 @@ type ATN struct {
// specified, and otherwise is nil.
ruleToTokenType []int
- // ATNStates is a list of all states in the ATN, ordered by state number.
- //
states []ATNState
- mu Mutex
- stateMu RWMutex
- edgeMu RWMutex
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
}
// NewATN returns a new ATN struct representing the given grammarType and is used
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
new file mode 100644
index 0000000000..7619fa172e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
@@ -0,0 +1,303 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ Equals(o Collectable[ATNConfig]) bool
+ Hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
+ if b == o {
+ return true
+ } else if o == nil {
+ return false
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.Equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.Equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for BaseATNConfig, when no specialist hash function
+// is required for a collection
+func (b *BaseATNConfig) Hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.Hash())
+ h = murmurUpdate(h, l.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
+ if l == other {
+ return true
+ }
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.Equals(othert.BaseATNConfig)
+}
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
new file mode 100644
index 0000000000..43e9b33f3b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type ATNConfigSet interface {
+ Hash() int
+ Equals(o Collectable[ATNConfig]) bool
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() *JStore[ATNState, Comparator[ATNState]]
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare is a hack function just to verify that adding DFAstares to the known
+// set works, so long as comparison of ATNConfigSet s works. For that to work, we
+// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
+// know the order, so we do this inefficient hack. If this proves the point, then
+// we can change the config set to a better structure.
+func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+
+ for _, c := range b.configs {
+ found := false
+ for _, c2 := range bs.configs {
+ if c.Equals(c2) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+
+ }
+ return true
+}
+
+func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *BaseATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
similarity index 86%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
index bdb30b3622..3c975ec7bf 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.readOnly = readOnly
}
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.verifyATN = verifyATN
}
@@ -42,12 +42,11 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
}
-//goland:noinspection GoUnusedExportedFunction
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
index 2dcb9ae11b..3888856b4b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
@@ -35,7 +35,6 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
return &ATNDeserializer{options: options}
}
-//goland:noinspection GoUnusedFunction
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
@@ -194,7 +193,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
}
}
-func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
m := a.readInt()
// Preallocate the needed capacity.
@@ -351,7 +350,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart.endState = bypassStop
- atn.defineDecisionState(&bypassStart.BaseDecisionState)
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
@@ -451,7 +450,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
continue
}
- // We analyze the [ATN] to determine if an ATN decision state is the
+ // We analyze the ATN to determine if a ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
@@ -554,7 +553,7 @@ func (a *ATNDeserializer) readInt() int {
return int(v) // data is 32 bits but int is at least that big
}
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
similarity index 66%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
index afe6c9f809..41529115fa 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
@@ -4,7 +4,7 @@
package antlr
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
type IATNSimulator interface {
SharedContextCache() *PredictionContextCache
@@ -18,13 +18,22 @@ type BaseATNSimulator struct {
decisionToDFA []*DFA
}
-func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
if b.sharedContextCache == nil {
return context
}
- //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
- visited := NewVisitRecord()
+ visited := make(map[PredictionContext]PredictionContext)
+
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
similarity index 65%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
index 2ae5807cdb..1f2a56bc31 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
@@ -4,11 +4,7 @@
package antlr
-import (
- "fmt"
- "os"
- "strconv"
-)
+import "strconv"
// Constants for serialization.
const (
@@ -29,7 +25,6 @@ const (
ATNStateInvalidStateNumber = -1
)
-//goland:noinspection GoUnusedGlobalVariable
var ATNStateInitialNumTransitions = 4
type ATNState interface {
@@ -78,7 +73,7 @@ type BaseATNState struct {
transitions []Transition
}
-func NewATNState() *BaseATNState {
+func NewBaseATNState() *BaseATNState {
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
}
@@ -153,46 +148,27 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) {
if len(as.transitions) == 0 {
as.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
as.epsilonOnlyTransitions = false
}
- // TODO: Check code for already present compared to the Java equivalent
- //alreadyPresent := false
- //for _, t := range as.transitions {
- // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
- // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
- // alreadyPresent = true
- // break
- // }
- // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
- // alreadyPresent = true
- // break
- // }
- //}
- //if !alreadyPresent {
if index == -1 {
as.transitions = append(as.transitions, trans)
} else {
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
// TODO: as.transitions.splice(index, 1, trans)
}
- //} else {
- // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
- //}
}
type BasicState struct {
- BaseATNState
+ *BaseATNState
}
func NewBasicState() *BasicState {
- return &BasicState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
}
type DecisionState interface {
@@ -206,19 +182,13 @@ type DecisionState interface {
}
type BaseDecisionState struct {
- BaseATNState
+ *BaseATNState
decision int
nonGreedy bool
}
func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- }
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
}
func (s *BaseDecisionState) getDecision() int {
@@ -246,20 +216,12 @@ type BlockStartState interface {
// BaseBlockStartState is the start of a regular (...) block.
type BaseBlockStartState struct {
- BaseDecisionState
+ *BaseDecisionState
endState *BlockEndState
}
func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- },
- }
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
}
func (s *BaseBlockStartState) getEndState() *BlockEndState {
@@ -271,38 +233,31 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
}
type BasicBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewBasicBlockStartState() *BasicBlockStartState {
- return &BasicBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &BasicBlockStartState{}
// BlockEndState is a terminal node of a simple (a|b|c) block.
type BlockEndState struct {
- BaseATNState
+ *BaseATNState
startState ATNState
}
func NewBlockEndState() *BlockEndState {
- return &BlockEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockEnd,
- },
- startState: nil,
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
}
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
@@ -310,48 +265,43 @@ func NewBlockEndState() *BlockEndState {
// encode references to all calls to this rule to compute FOLLOW sets for error
// handling.
type RuleStopState struct {
- BaseATNState
+ *BaseATNState
}
func NewRuleStopState() *RuleStopState {
- return &RuleStopState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStop,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
}
type RuleStartState struct {
- BaseATNState
+ *BaseATNState
stopState ATNState
isPrecedenceRule bool
}
func NewRuleStartState() *RuleStartState {
- return &RuleStartState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStart,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
}
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
// transitions: one to the loop back to start of the block, and one to exit.
type PlusLoopbackState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewPlusLoopbackState() *PlusLoopbackState {
- return &PlusLoopbackState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusLoopBack,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
}
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
@@ -359,103 +309,85 @@ func NewPlusLoopbackState() *PlusLoopbackState {
// it is included for completeness. In reality, PlusLoopbackState is the real
// decision-making node for A+.
type PlusBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
loopBackState ATNState
}
func NewPlusBlockStartState() *PlusBlockStartState {
- return &PlusBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &PlusBlockStartState{}
// StarBlockStartState is the block that begins a closure loop.
type StarBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewStarBlockStartState() *StarBlockStartState {
- return &StarBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &StarBlockStartState{}
type StarLoopbackState struct {
- BaseATNState
+ *BaseATNState
}
func NewStarLoopbackState() *StarLoopbackState {
- return &StarLoopbackState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopBack,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
}
type StarLoopEntryState struct {
- BaseDecisionState
+ *BaseDecisionState
loopBackState ATNState
precedenceRuleDecision bool
}
func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopEntry,
- },
- },
- }
+ return &StarLoopEntryState{BaseDecisionState: b}
}
// LoopEndState marks the end of a * or + loop.
type LoopEndState struct {
- BaseATNState
+ *BaseATNState
loopBackState ATNState
}
func NewLoopEndState() *LoopEndState {
- return &LoopEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateLoopEnd,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
}
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
type TokensStartState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewTokensStartState() *TokensStartState {
- return &TokensStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateTokenStart,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_type.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
similarity index 89%
rename from vendor/github.com/antlr4-go/antlr/v4/char_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
index bd8127b6b5..c33f0adb5e 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
@@ -8,5 +8,5 @@ type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
- GetTextFromInterval(Interval) string
+ GetTextFromInterval(*Interval) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
similarity index 88%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
index b75da9df08..c6c9485a20 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
@@ -28,24 +28,22 @@ type CommonTokenStream struct {
// trivial with bt field.
fetchedEOF bool
- // index into [tokens] of the current token (next token to consume).
+ // index indexs into tokens of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
- // see the documentation of [IntStream] for a description of initializing methods.
+ // see the documentation of IntStream for a description of initializing methods.
index int
- // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // tokenSource is the TokenSource from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
- // tokens contains all tokens fetched from the token source. The list is considered a
+ // tokens is all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
-// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
-// tokens and will pull tokens from the given lexer channel.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
@@ -55,7 +53,6 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
}
}
-// GetAllTokens returns all tokens currently pulled from the token source.
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
@@ -64,11 +61,9 @@ func (c *CommonTokenStream) Mark() int {
return 0
}
-func (c *CommonTokenStream) Release(_ int) {}
+func (c *CommonTokenStream) Release(marker int) {}
-func (c *CommonTokenStream) Reset() {
- c.fetchedEOF = false
- c.tokens = make([]Token, 0)
+func (c *CommonTokenStream) reset() {
c.Seek(0)
}
@@ -112,7 +107,7 @@ func (c *CommonTokenStream) Consume() {
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // How many more elements do we need?
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
@@ -198,13 +193,12 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
- c.fetchedEOF = false
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between 'i' and [TokenEOF].
-func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
c.Sync(i)
if i >= len(c.tokens) {
@@ -250,7 +244,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
- // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
var to int
if nextOnChannel == -1 {
@@ -320,8 +314,7 @@ func (c *CommonTokenStream) Index() int {
}
func (c *CommonTokenStream) GetAllText() string {
- c.Fill()
- return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+ return c.GetTextFromInterval(nil)
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
@@ -336,9 +329,15 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
return c.GetTextFromInterval(interval.GetSourceInterval())
}
-func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
- c.Sync(interval.Stop)
+
+ if interval == nil {
+ c.Fill()
+ interval = NewInterval(0, len(c.tokens)-1)
+ } else {
+ c.Sync(interval.Stop)
+ }
start := interval.Start
stop := interval.Stop
diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
similarity index 82%
rename from vendor/github.com/antlr4-go/antlr/v4/comparators.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
index 7467e9b43d..9ea3200536 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/comparators.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
@@ -18,20 +18,17 @@ package antlr
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
-// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
- aStateEqInst = &ObjEqComparator[ATNState]{}
- aConfEqInst = &ObjEqComparator[*ATNConfig]{}
-
- // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
- aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
- atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[ATNConfig]{}
+ aConfCompInst = &ATNConfigComparator[ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
- atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
- pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
)
// Equals2 delegates to the Equals() method of type T
@@ -47,14 +44,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
type SemCComparator[T Collectable[T]] struct{}
-// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -75,8 +72,7 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
-
+func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
@@ -89,7 +85,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -109,21 +105,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
-// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
-func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -145,6 +141,7 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
-func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
+
return o.Hash()
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
similarity index 76%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
index 6b63eb1589..bfd43e1f73 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
@@ -4,8 +4,6 @@
package antlr
-// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
-// reach and the transitions between them.
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -14,9 +12,10 @@ type DFA struct {
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
- // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
- // to see if they really are the same object. Hence, we have our own map storage.
+ // to see if they really are the same object.
+ //
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
@@ -33,11 +32,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: nil, // Lazy initialize
+ states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
@@ -96,11 +95,12 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.states = nil // Lazy initialize
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
d.numstates = 0
if precedenceDfa {
- precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
@@ -113,31 +113,6 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
-// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
-// instantiation of the states JMap.
-func (d *DFA) Len() int {
- if d.states == nil {
- return 0
- }
- return d.states.Len()
-}
-
-// Get returns a state that matches s if it is present in the DFA state set. We defer to this
-// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
-func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- return nil, false
- }
- return d.states.Get(s)
-}
-
-func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
- }
- return d.states.Put(s)
-}
-
func (d *DFA) getS0() *DFAState {
return d.s0
}
@@ -146,11 +121,9 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
- if d.states == nil {
- return []*DFAState{}
- }
+
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
index 0e11009899..84d0a31e53 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// DFASerializer is a DFA walker that knows how to dump them to serialized
// strings.
type DFASerializer struct {
dfa *DFA
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
similarity index 81%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
index 6541430745..c90dec55c8 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
@@ -22,31 +22,30 @@ func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
-// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1, a2,..an, the DFA is in a state that represents the
+// after reading input a1a2..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an."
-//
-// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
-// states the [ATN] could be in. We need to track the alt predicted by each state
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
-// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
-// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
// (if any) followed to arrive at that state.
//
-// A [DFAState] may have multiple references to a particular state, but with
-// different [ATN] contexts (with same or different alts) meaning that state was
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
- configs *ATNConfigSet
+ configs ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
@@ -54,7 +53,7 @@ type DFAState struct {
isAcceptState bool
- // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // prediction is the ttype we match or alt we predict if the state is accept.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
@@ -82,9 +81,9 @@ type DFAState struct {
predicates []*PredPrediction
}
-func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
if configs == nil {
- configs = NewATNConfigSet(false)
+ configs = NewBaseATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
@@ -95,7 +94,7 @@ func (d *DFAState) GetAltSet() []int {
var alts []int
if d.configs != nil {
- for _, c := range d.configs.configs {
+ for _, c := range d.configs.GetItems() {
alts = append(alts, c.GetAlt())
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
similarity index 92%
rename from vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
index bd2cd8bc3a..c55bcc19b2 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
@@ -33,7 +33,6 @@ type DiagnosticErrorListener struct {
exactOnly bool
}
-//goland:noinspection GoUnusedExportedFunction
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
@@ -43,7 +42,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
return n
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -56,7 +55,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -65,7 +64,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
@@ -97,12 +96,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
- for _, c := range set.configs {
+ for _, c := range set.GetItems() {
result.add(c.GetAlt())
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
similarity index 62%
rename from vendor/github.com/antlr4-go/antlr/v4/error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
index 21a0216434..f679f0dcd5 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
@@ -16,29 +16,28 @@ import (
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}
type DefaultErrorListener struct {
}
-//goland:noinspection GoUnusedExportedFunction
func NewDefaultErrorListener() *DefaultErrorListener {
return new(DefaultErrorListener)
}
-func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
}
-func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
}
type ConsoleErrorListener struct {
@@ -49,16 +48,21 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
-// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+// Provides a default instance of {@link ConsoleErrorListener}.
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-// SyntaxError prints messages to System.err containing the
-// values of line, charPositionInLine, and msg using
-// the following format:
+// {@inheritDoc}
//
-// line :
-func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
- _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line:charPositionInLine msg
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
type ProxyErrorListener struct {
@@ -81,19 +85,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol
}
}
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
similarity index 58%
rename from vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
index 9db2be1c74..5c0a637ba4 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
@@ -21,8 +21,8 @@ type ErrorStrategy interface {
ReportMatch(Parser)
}
-// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
-// error reporting and recovery in ANTLR parsers.
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
@@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
- // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
@@ -62,37 +62,50 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
-func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+//
+// @param recognizer the parser instance
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
-func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+//
+// @param recognizer
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
-// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+// {@inheritDoc}
+//
+// The default implementation simply calls {@link //endErrorCondition}.
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
d.endErrorCondition(recognizer)
}
-// ReportError is the default implementation of error reporting.
-// It returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls [beginErrorCondition]
-// and dispatches the Reporting task based on the runtime type of e
-// according to the following table.
-//
-// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
-// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
-// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
-// All other types : Calls [NotifyErrorListeners] to Report the exception
+// {@inheritDoc}
+//
+// The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+// - {@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+// - {@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+// - {@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
@@ -115,10 +128,12 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
}
}
-// Recover is the default recovery implementation.
-// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
-// loosely the set of tokens that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+// {@inheritDoc}
+//
+// The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
@@ -133,58 +148,54 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException
d.lastErrorStates = NewIntervalSet()
}
d.lastErrorStates.addOne(recognizer.GetState())
- followSet := d.GetErrorRecoverySet(recognizer)
+ followSet := d.getErrorRecoverySet(recognizer)
d.consumeUntil(recognizer, followSet)
}
-// Sync is the default implementation of error strategy synchronization.
-//
-// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
-// at this point in the [ATN]. You can call this anytime but ANTLR only
-// generates code to check before sub-rules/loops and each iteration.
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
//
-// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
-// sub-rules. E.g.:
+// Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
//
-// At the start of a sub-rule upon error, Sync performs single
+// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
-// If the sub-rule is optional
-//
-// ({@code (...)?}, {@code (...)*},
+// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
//
-// or a block with an empty alternative), then the expected set includes what follows
-// the sub-rule.
+// During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
//
-// During loop iteration, it consumes until it sees a token that can start a
-// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible.
+// ORIGINS
//
-// # Origins
-//
-// Previous versions of ANTLR did a poor job of their recovery within loops.
+// Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule:
+// out of the entire rules surrounding the loop. So, for rule
//
-// classfunc : 'class' ID '{' member* '}'
+//
+// classfunc : 'class' ID '{' member* '}'
+//
//
// input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
-// This functionality cost a bit of effort because the parser has to
-// compare the token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off this
-// functionality by simply overriding this method as empty:
-//
-// { }
-//
-// [Jim Idle]: https://github.com/jimidle
+// This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.InErrorRecoveryMode(recognizer) {
@@ -206,21 +217,25 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
if d.SingleTokenDeletion(recognizer) != nil {
return
}
- recognizer.SetError(NewInputMisMatchException(recognizer))
+ panic(NewInputMisMatchException(recognizer))
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
d.ReportUnwantedToken(recognizer)
expecting := NewIntervalSet()
expecting.addSet(recognizer.GetExpectedTokens())
- whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
default:
// do nothing if we can't identify the exact kind of ATN state
}
}
-// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
//
-// See also [ReportError]
+// @param recognizer the parser instance
+// @param e the recognition exception
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
@@ -237,38 +252,48 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
//
-// See also: [ReportError]
-func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
- msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
//
-// See also: [ReportError]
+// @param recognizer the parser instance
+// @param e the recognition exception
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-// ReportUnwantedToken is called to report a syntax error that requires the removal
+// This method is called to Report a syntax error which requires the removal
// of a token from the input stream. At the time d method is called, the
-// erroneous symbol is the current LT(1) symbol and has not yet been
-// removed from the input stream. When this method returns,
-// recognizer is in error recovery mode.
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
//
-// This method is called when singleTokenDeletion identifies
+// This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
+// input error.
//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls beginErrorCondition to
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
-// [NotifyErrorListeners]
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -282,18 +307,21 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
recognizer.NotifyErrorListeners(msg, t, nil)
}
-// ReportMissingToken is called to report a syntax error which requires the
-// insertion of a missing token into the input stream. At the time this
-// method is called, the missing token has not yet been inserted. When this
-// method returns, recognizer is in error recovery mode.
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
//
-// This method is called when singleTokenInsertion identifies
+// This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
+// input error.
//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls beginErrorCondition to
-// enter error recovery mode, followed by calling [NotifyErrorListeners]
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -306,48 +334,54 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
recognizer.NotifyErrorListeners(msg, t, nil)
}
-// The RecoverInline default implementation attempts to recover from the mismatched input
+// The default implementation attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, this method panics with [InputMisMatchException}.
-// TODO: Not sure that panic() is the right thing to do here - JI
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
//
-// # EXTRA TOKEN (single token deletion)
+// EXTRA TOKEN (single token deletion)
//
-// LA(1) is not what we are looking for. If LA(2) has the
-// right token, however, then assume LA(1) is some extra spurious
+// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
-// the LA(2) token) as the successful result of the Match operation.
+// the {@code LA(2)} token) as the successful result of the Match operation.
//
-// # This recovery strategy is implemented by singleTokenDeletion
+// This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
//
-// # MISSING TOKEN (single token insertion)
+// MISSING TOKEN (single token insertion)
//
-// If current token -at LA(1) - is consistent with what could come
-// after the expected LA(1) token, then assume the token is missing
-// and use the parser's [TokenFactory] to create it on the fly. The
-// “insertion” is performed by returning the created token as the successful
-// result of the Match operation.
+// If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
//
-// This recovery strategy is implemented by [SingleTokenInsertion].
+// This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
//
-// # Example
+// EXAMPLE
//
-// For example, Input i=(3 is clearly missing the ')'. When
-// the parser returns from the nested call to expr, it will have
-// call the chain:
+// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
//
-// stat → expr → atom
+//
+// stat &rarr expr &rarr atom
+//
//
-// and it will be trying to Match the ')' at this point in the
+// and it will be trying to Match the {@code ')'} at d point in the
// derivation:
//
-// : ID '=' '(' INT ')' ('+' atom)* ';'
-// ^
+//
+// => ID '=' '(' INT ')' ('+' atom)* ”
+// ^
+//
//
-// The attempt to [Match] ')' will fail when it sees ';' and
-// call [RecoverInline]. To recover, it sees that LA(1)==';'
-// is in the set of tokens that can follow the ')' token reference
-// in rule atom. It can assume that you forgot the ')'.
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
@@ -362,24 +396,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
return d.GetMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
- recognizer.SetError(NewInputMisMatchException(recognizer))
- return nil
+ panic(NewInputMisMatchException(recognizer))
}
-// SingleTokenInsertion implements the single-token insertion inline error recovery
-// strategy. It is called by [RecoverInline] if the single-token
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
-// This method determines whether single-token insertion is viable by
-// checking if the LA(1) input symbol could be successfully Matched
-// if it were instead the LA(2) symbol. If this method returns
+// This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce this behavior.
+// token with the correct type to produce d behavior.
//
-// This func returns true if single-token insertion is a viable recovery
-// strategy for the current mismatched input.
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
@@ -397,21 +431,23 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
return false
}
-// SingleTokenDeletion implements the single-token deletion inline error recovery
-// strategy. It is called by [RecoverInline] to attempt to recover
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil,
-// recognizer will not be in error recovery mode since the
+// {@code recognizer} will not be in error recovery mode since the
// returned token was a successful Match.
//
-// If the single-token deletion is successful, this method calls
-// [ReportUnwantedToken] to Report the error, followed by
-// [Consume] to actually “delete” the extraneous token. Then,
-// before returning, [ReportMatch] is called to signal a successful
-// Match.
+// If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
//
-// The func returns the successfully Matched [Token] instance if single-token
-// deletion successfully recovers from the mismatched input, otherwise nil.
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
@@ -431,28 +467,24 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
return nil
}
-// GetMissingSymbol conjures up a missing token during error recovery.
+// Conjure up a missing token during error recovery.
//
// The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol.
-// For example:
-//
-// x=ID {f($x)}.
-//
-// The action clearly assumes
+// For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that
-// this token is missing, and we keep going. Because we
+// d token is missing and we keep going. Because we
// have to return some token to replace the missing token,
// we have to conjure one up. This method gives the user control
// over the tokens returned for missing tokens. Mostly,
// you will want to create something special for identifier
// tokens. For literals such as '{' and ',', the default
// action in the parser or tree parser works. It simply creates
-// a [CommonToken] of the appropriate type. The text will be the token name.
-// If you need to change which tokens must be created by the lexer,
-// override this method to create the appropriate tokens.
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
@@ -466,7 +498,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
if expectedTokenType > 0 && expectedTokenType < len(ln) {
tokenText = ""
} else {
- tokenText = "" // TODO: matches the JS impl
+ tokenText = "" // TODO matches the JS impl
}
}
current := currentSymbol
@@ -484,13 +516,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
return recognizer.GetExpectedTokens()
}
-// GetTokenErrorDisplay determines how a token should be displayed in an error message.
-// The default is to display just the text, but during development you might
-// want to have a lot of information spit out. Override this func in that case
-// to use t.String() (which, for [CommonToken], dumps everything about
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
-// so that it creates a new type.
+// so that it creates a NewJava type.
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -513,57 +545,52 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
return "'" + s + "'"
}
-// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// Compute the error recovery set for the current rule. During
// rule invocation, the parser pushes the set of tokens that can
-// follow that rule reference on the stack. This amounts to
+// follow that rule reference on the stack d amounts to
// computing FIRST of what follows the rule reference in the
// enclosing rule. See LinearApproximator.FIRST().
-//
// This local follow set only includes tokens
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
-// # Example
+// # EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
// thing to do is to consume tokens until you see something that
-// can legally follow a call to r or any rule that called r.
+// can legally follow a call to r//or* any rule that called r.
// You don't want the exact set of viable next tokens because the
// input might just be missing a token--you might consume the
// rest of the input looking for one of the missing tokens.
//
-// Consider the grammar:
-//
-// a : '[' b ']'
-// | '(' b ')'
-// ;
+// Consider grammar:
//
-// b : c '^' INT
-// ;
+// a : '[' b ']'
+// | '(' b ')'
//
-// c : ID
-// | INT
-// ;
+// b : c '^' INT
+// c : ID
+// | INT
//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
//
-// FOLLOW(b1_in_a) = FIRST(']') = ']'
-// FOLLOW(b2_in_a) = FIRST(')') = ')'
-// FOLLOW(c_in_b) = FIRST('^') = '^'
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
//
-// Upon erroneous input “[]”, the call chain is
+// Upon erroneous input "[]", the call chain is
//
-// a → b → c
+// a -> b -> c
//
// and, hence, the follow context stack is:
//
-// Depth Follow set Start of rule execution
-// 0 a (from main())
-// 1 ']' b
-// 2 '^' c
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
//
// Notice that ')' is not included, because b would have to have
// been called from a different context in rule a for ')' to be
@@ -571,14 +598,11 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// For error recovery, we cannot consider FOLLOW(c)
// (context-sensitive or otherwise). We need the combined set of
-// all context-sensitive FOLLOW sets - the set of all tokens that
+// all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we reSync'd to that token, we'd consume until EOF. We need to
-// Sync to context-sensitive FOLLOWs for a, b, and c:
-//
-// {']','^'}
-//
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though.
@@ -596,19 +620,22 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// ANTLR's error recovery mechanism is based upon original ideas:
//
-// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
-// [A note on error recovery in recursive descent parsers].
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
//
-// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
-// Parsers]
+// Later, Josef Grosch had some good ideas:
//
-// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
-// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
//
-// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
-// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
-// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
recoverSet := NewIntervalSet()
@@ -633,36 +660,40 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet)
}
}
-// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
// by immediately canceling the parse operation with a
-// [ParseCancellationException]. The implementation ensures that the
-// [ParserRuleContext//exception] field is set for all parse tree nodes
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
// that were not completed prior to encountering the error.
//
-// This error strategy is useful in the following scenarios.
-//
-// - Two-stage parsing: This error strategy allows the first
-// stage of two-stage parsing to immediately terminate if an error is
-// encountered, and immediately fall back to the second stage. In addition to
-// avoiding wasted work by attempting to recover from errors here, the empty
-// implementation of [BailErrorStrategy.Sync] improves the performance of
-// the first stage.
+//
+// This error strategy is useful in the following scenarios.
//
-// - Silent validation: When syntax errors are not being
-// Reported or logged, and the parse result is simply ignored if errors occur,
-// the [BailErrorStrategy] avoids wasting work on recovering from errors
-// when the result will be ignored either way.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
//
-// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
//
-// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
type BailErrorStrategy struct {
*DefaultErrorStrategy
}
var _ ErrorStrategy = &BailErrorStrategy{}
-//goland:noinspection GoUnusedExportedFunction
func NewBailErrorStrategy() *BailErrorStrategy {
b := new(BailErrorStrategy)
@@ -672,10 +703,10 @@ func NewBailErrorStrategy() *BailErrorStrategy {
return b
}
-// Recover Instead of recovering from exception e, re-panic it wrapped
-// in a [ParseCancellationException] so it is not caught by the
-// rule func catches. Use Exception.GetCause() to get the
-// original [RecognitionException].
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
@@ -686,10 +717,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context = nil
}
}
- recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
}
-// RecoverInline makes sure we don't attempt to recover inline if the parser
+// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
@@ -697,6 +728,7 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
return nil
}
-// Sync makes sure we don't attempt to recover from problems in sub-rules.
-func (b *BailErrorStrategy) Sync(_ Parser) {
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
similarity index 73%
rename from vendor/github.com/antlr4-go/antlr/v4/errors.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
index 8f0f2f601f..3954c13782 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/errors.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
@@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
// } else {
// stack := NewError().stack
// }
- // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
t := new(BaseRecognitionException)
@@ -43,17 +43,15 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
t.recognizer = recognizer
t.input = input
t.ctx = ctx
-
- // The current Token when an error occurred. Since not all streams
+ // The current {@link Token} when an error occurred. Since not all streams
// support accessing symbols by index, we have to track the {@link Token}
// instance itself.
- //
t.offendingToken = nil
-
// Get the ATN state number the parser was in at the time the error
- // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
- // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
- //
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
t.offendingState = -1
if t.recognizer != nil {
t.offendingState = t.recognizer.GetState()
@@ -76,15 +74,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
// If the state number is not known, b method returns -1.
-// getExpectedTokens gets the set of input symbols which could potentially follow the
-// previously Matched symbol at the time this exception was raised.
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
//
-// If the set of expected tokens is not known and could not be computed,
-// this method returns nil.
+// If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
//
-// The func returns the set of token types that could potentially follow the current
-// state in the {ATN}, or nil if the information is not available.
-
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
if b.recognizer != nil {
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
@@ -101,10 +99,10 @@ type LexerNoViableAltException struct {
*BaseRecognitionException
startIndex int
- deadEndConfigs *ATNConfigSet
+ deadEndConfigs ATNConfigSet
}
-func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
l := new(LexerNoViableAltException)
@@ -130,16 +128,14 @@ type NoViableAltException struct {
startToken Token
offendingToken Token
ctx ParserRuleContext
- deadEndConfigs *ATNConfigSet
+ deadEndConfigs ATNConfigSet
}
-// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// Indicates that the parser could not decide which of two or more paths
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
-// in the various paths when the error.
-//
-// Reported by [ReportNoViableAlternative]
-func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
ctx = recognizer.GetParserRuleContext()
@@ -161,14 +157,12 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.Index() that couldn't Match
- // input.LT(1)
+ // input.LT(1)?//
n.deadEndConfigs = deadEndConfigs
-
// The token object at the start index the input stream might
- // not be buffering tokens so get a reference to it.
- //
- // At the time the error occurred, of course the stream needs to keep a
- // buffer of all the tokens, but later we might not have access to those.
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
n.startToken = startToken
n.offendingToken = offendingToken
@@ -179,7 +173,7 @@ type InputMisMatchException struct {
*BaseRecognitionException
}
-// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
@@ -192,10 +186,11 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
}
-// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during
// prediction.
+
type FailedPredicateException struct {
*BaseRecognitionException
@@ -204,7 +199,6 @@ type FailedPredicateException struct {
predicate string
}
-//goland:noinspection GoUnusedExportedFunction
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
f := new(FailedPredicateException)
@@ -237,21 +231,6 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri
type ParseCancellationException struct {
}
-func (p ParseCancellationException) GetOffendingToken() Token {
- //TODO implement me
- panic("implement me")
-}
-
-func (p ParseCancellationException) GetMessage() string {
- //TODO implement me
- panic("implement me")
-}
-
-func (p ParseCancellationException) GetInputStream() IntStream {
- //TODO implement me
- panic("implement me")
-}
-
func NewParseCancellationException() *ParseCancellationException {
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
similarity index 52%
rename from vendor/github.com/antlr4-go/antlr/v4/file_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
index 5f65f809be..bd6ad5efe3 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
@@ -5,7 +5,8 @@
package antlr
import (
- "bufio"
+ "bytes"
+ "io"
"os"
)
@@ -13,53 +14,34 @@ import (
// when you construct the object.
type FileStream struct {
- InputStream
+ *InputStream
+
filename string
}
-//goland:noinspection GoUnusedExportedFunction
func NewFileStream(fileName string) (*FileStream, error) {
+ buf := bytes.NewBuffer(nil)
+
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
-
- defer func(f *os.File) {
- errF := f.Close()
- if errF != nil {
- }
- }(f)
-
- reader := bufio.NewReader(f)
- fInfo, err := f.Stat()
+ defer f.Close()
+ _, err = io.Copy(buf, f)
if err != nil {
return nil, err
}
- fs := &FileStream{
- InputStream: InputStream{
- index: 0,
- name: fileName,
- },
- filename: fileName,
- }
+ fs := new(FileStream)
- // Pre-build the buffer and read runes efficiently
- //
- fs.data = make([]rune, 0, fInfo.Size())
- for {
- r, _, err := reader.ReadRune()
- if err != nil {
- break
- }
- fs.data = append(fs.data, r)
- }
- fs.size = len(fs.data) // Size in runes
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
- // All done.
- //
return fs, nil
+
}
func (f *FileStream) GetSourceName() string {
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
new file mode 100644
index 0000000000..a8b889cedb
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/int_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
similarity index 82%
rename from vendor/github.com/antlr4-go/antlr/v4/interval_set.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
index cc5066067a..c1e155e818 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
@@ -14,21 +14,20 @@ type Interval struct {
Stop int
}
-// NewInterval creates a new interval with the given start and stop values.
-func NewInterval(start, stop int) Interval {
- return Interval{
- Start: start,
- Stop: stop,
- }
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
}
-// Contains returns true if the given item is contained within the interval.
-func (i Interval) Contains(item int) bool {
+func (i *Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
-// String generates a string representation of the interval.
-func (i Interval) String() string {
+func (i *Interval) String() string {
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
@@ -36,18 +35,15 @@ func (i Interval) String() string {
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
-// Length returns the length of the interval.
-func (i Interval) Length() int {
+func (i *Interval) length() int {
return i.Stop - i.Start
}
-// IntervalSet represents a collection of [Intervals], which may be read-only.
type IntervalSet struct {
- intervals []Interval
+ intervals []*Interval
readOnly bool
}
-// NewIntervalSet creates a new empty, writable, interval set.
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
@@ -58,20 +54,6 @@ func NewIntervalSet() *IntervalSet {
return i
}
-func (i *IntervalSet) Equals(other *IntervalSet) bool {
- if len(i.intervals) != len(other.intervals) {
- return false
- }
-
- for k, v := range i.intervals {
- if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
- return false
- }
- }
-
- return true
-}
-
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
@@ -88,16 +70,16 @@ func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
-func (i *IntervalSet) addInterval(v Interval) {
+func (i *IntervalSet) addInterval(v *Interval) {
if i.intervals == nil {
- i.intervals = make([]Interval, 0)
+ i.intervals = make([]*Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.Stop < interval.Start {
- i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
@@ -157,16 +139,16 @@ func (i *IntervalSet) contains(item int) bool {
}
func (i *IntervalSet) length() int {
- iLen := 0
+ len := 0
for _, v := range i.intervals {
- iLen += v.Length()
+ len += v.length()
}
- return iLen
+ return len
}
-func (i *IntervalSet) removeRange(v Interval) {
+func (i *IntervalSet) removeRange(v *Interval) {
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
@@ -180,7 +162,7 @@ func (i *IntervalSet) removeRange(v Interval) {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
@@ -217,7 +199,7 @@ func (i *IntervalSet) removeOne(v int) {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
}
}
@@ -241,7 +223,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
-func (i *IntervalSet) GetIntervals() []Interval {
+func (i *IntervalSet) GetIntervals() []*Interval {
return i.intervals
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
new file mode 100644
index 0000000000..e5a74f0c6c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
@@ -0,0 +1,198 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "sort"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(value)
+
+ for _, v1 := range s.store[kh] {
+ if s.comparator.Equals2(value, v1) {
+ return v1, true
+ }
+ }
+ s.store[kh] = append(s.store[kh], value)
+ s.len++
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(key)
+
+ for _, v := range s.store[kh] {
+ if s.comparator.Equals2(key, v) {
+ return v, true
+ }
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
+
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ for _, v := range e {
+ vs = append(vs, v)
+ }
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
+ return &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) {
+ kh := m.comparator.Hash1(key)
+
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ m.len++
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+
+ var none V
+ kh := m.comparator.Hash1(key)
+ for _, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ return e.val, true
+ }
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return len(m.store)
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
similarity index 78%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
index e5594b2168..6533f05164 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
@@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer {
// create a single token. NextToken will return l object after
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
- // something non nil so that the auto token emit mechanism will not
+ // something nonnil so that the auto token emit mechanism will not
// emit another token.
lexer.token = nil
@@ -111,7 +111,6 @@ const (
LexerSkip = -3
)
-//goland:noinspection GoUnusedConst
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
@@ -119,7 +118,7 @@ const (
LexerMaxCharValue = 0x10FFFF
)
-func (b *BaseLexer) Reset() {
+func (b *BaseLexer) reset() {
// wack Lexer state variables
if b.input != nil {
b.input.Seek(0) // rewind the input
@@ -177,7 +176,7 @@ func (b *BaseLexer) safeMatch() (ret int) {
return b.Interpreter.Match(b.input, b.mode)
}
-// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+// Return a token from l source i.e., Match a token on the char stream.
func (b *BaseLexer) NextToken() Token {
if b.input == nil {
panic("NextToken requires a non-nil input stream.")
@@ -206,8 +205,9 @@ func (b *BaseLexer) NextToken() Token {
continueOuter := false
for {
b.thetype = TokenInvalidType
+ ttype := LexerSkip
- ttype := b.safeMatch() // Defaults to LexerSkip
+ ttype = b.safeMatch()
if b.input.LA(1) == TokenEOF {
b.hitEOF = true
@@ -234,11 +234,12 @@ func (b *BaseLexer) NextToken() Token {
}
}
-// Skip instructs the lexer to Skip creating a token for current lexer rule
-// and look for another token. [NextToken] knows to keep looking when
-// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
+// /
func (b *BaseLexer) Skip() {
b.thetype = LexerSkip
}
@@ -247,29 +248,23 @@ func (b *BaseLexer) More() {
b.thetype = LexerMore
}
-// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
-// will be in force.
func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
-// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
-// current lexer mode to the supplied mode m.
func (b *BaseLexer) PushMode(m int) {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
b.modeStack.Push(b.mode)
b.mode = m
}
-// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
-// return to.
func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
}
i, _ := b.modeStack.Pop()
@@ -285,7 +280,7 @@ func (b *BaseLexer) inputStream() CharStream {
func (b *BaseLexer) SetInputStream(input CharStream) {
b.input = nil
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
- b.Reset()
+ b.reset()
b.input = input
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
}
@@ -294,19 +289,20 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
return b.tokenFactorySourcePair
}
-// EmitToken by default does not support multiple emits per [NextToken] invocation
-// for efficiency reasons. Subclass and override this func, [NextToken],
-// and [GetToken] (to push tokens into a list and pull from that list
-// rather than a single variable as this implementation does).
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
func (b *BaseLexer) EmitToken(token Token) {
b.token = token
}
-// Emit is the standard method called to automatically emit a token at the
+// The standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override this method to emit
-// custom [Token] objects or provide a new factory.
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
// /
func (b *BaseLexer) Emit() Token {
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
@@ -314,7 +310,6 @@ func (b *BaseLexer) Emit() Token {
return t
}
-// EmitEOF emits an EOF token. By default, this is the last token emitted
func (b *BaseLexer) EmitEOF() Token {
cpos := b.GetCharPositionInLine()
lpos := b.GetLine()
@@ -323,7 +318,6 @@ func (b *BaseLexer) EmitEOF() Token {
return eof
}
-// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
func (b *BaseLexer) GetCharPositionInLine() int {
return b.Interpreter.GetCharPositionInLine()
}
@@ -340,12 +334,13 @@ func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
-// GetCharIndex returns the index of the current character of lookahead
+// What is the index of the current character of lookahead?///
func (b *BaseLexer) GetCharIndex() int {
return b.input.Index()
}
-// GetText returns the text Matched so far for the current token or any text override.
+// Return the text Matched so far for the current token or any text override.
+// Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
@@ -354,20 +349,17 @@ func (b *BaseLexer) GetText() string {
return b.Interpreter.GetText(b.input)
}
-// SetText sets the complete text of this token; it wipes any previous changes to the text.
func (b *BaseLexer) SetText(text string) {
b.text = text
}
-// GetATN returns the ATN used by the lexer.
func (b *BaseLexer) GetATN() *ATN {
return b.Interpreter.ATN()
}
-// GetAllTokens returns a list of all [Token] objects in input char stream.
-// Forces a load of all tokens that can be made from the input char stream.
-//
-// Does not include EOF token.
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
@@ -406,13 +398,11 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string {
return "'" + b.getErrorDisplayForChar(c) + "'"
}
-// Recover can normally Match any char in its vocabulary after Matching
-// a token, so here we do the easy thing and just kill a character and hope
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
-//
-// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
-// a character that makes no sense to the recognizer.
+// /
func (b *BaseLexer) Recover(re RecognitionException) {
if b.input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
similarity index 78%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
index eaa7393e06..111656c295 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
@@ -7,29 +7,14 @@ package antlr
import "strconv"
const (
- // LexerActionTypeChannel represents a [LexerChannelAction] action.
- LexerActionTypeChannel = 0
-
- // LexerActionTypeCustom represents a [LexerCustomAction] action.
- LexerActionTypeCustom = 1
-
- // LexerActionTypeMode represents a [LexerModeAction] action.
- LexerActionTypeMode = 2
-
- // LexerActionTypeMore represents a [LexerMoreAction] action.
- LexerActionTypeMore = 3
-
- // LexerActionTypePopMode represents a [LexerPopModeAction] action.
- LexerActionTypePopMode = 4
-
- // LexerActionTypePushMode represents a [LexerPushModeAction] action.
- LexerActionTypePushMode = 5
-
- // LexerActionTypeSkip represents a [LexerSkipAction] action.
- LexerActionTypeSkip = 6
-
- // LexerActionTypeType represents a [LexerTypeAction] action.
- LexerActionTypeType = 7
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
)
type LexerAction interface {
@@ -54,7 +39,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
return la
}
-func (b *BaseLexerAction) execute(_ Lexer) {
+func (b *BaseLexerAction) execute(lexer Lexer) {
panic("Not implemented")
}
@@ -67,19 +52,17 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
}
func (b *BaseLexerAction) Hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, b.actionType)
- return murmurFinish(h, 1)
+ return b.actionType
}
func (b *BaseLexerAction) Equals(other LexerAction) bool {
- return b.actionType == other.getActionType()
+ return b == other
}
-// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
-// The Skip command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
+// The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerSkipAction struct {
*BaseLexerAction
}
@@ -90,22 +73,17 @@ func NewLexerSkipAction() *LexerSkipAction {
return la
}
-// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
+// Provides a singleton instance of l parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
-// String returns a string representation of the current [LexerSkipAction].
func (l *LexerSkipAction) String() string {
return "skip"
}
-func (b *LexerSkipAction) Equals(other LexerAction) bool {
- return other.getActionType() == LexerActionTypeSkip
-}
-
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
//
// with the assigned type.
@@ -147,10 +125,11 @@ func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
-// LexerPushModeAction implements the pushMode lexer action by calling
-// [Lexer.pushMode] with the assigned mode.
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
+
mode int
}
@@ -190,10 +169,10 @@ func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
-// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
//
-// The popMode command does not have any parameters, so this action is
-// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
+// The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerPopModeAction struct {
*BaseLexerAction
}
@@ -245,10 +224,11 @@ func (l *LexerMoreAction) String() string {
return "more"
}
-// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
+// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
+
mode int
}
@@ -342,19 +322,16 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
}
}
-// LexerChannelAction implements the channel lexer action by calling
-// [Lexer.setChannel] with the assigned channel.
-//
-// Constructs a new channel action with the specified channel value.
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
type LexerChannelAction struct {
*BaseLexerAction
+
channel int
}
-// NewLexerChannelAction creates a channel lexer action by calling
-// [Lexer.setChannel] with the assigned channel.
-//
-// Constructs a new channel action with the specified channel value.
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
@@ -398,22 +375,25 @@ func (l *LexerChannelAction) String() string {
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+// Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
+
offset int
lexerAction LexerAction
isPositionDependent bool
}
-// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
-// with a [LexerAction].
-//
-// Note: This class is only required for lexer actions for which
-// [LexerAction.isPositionDependent] returns true.
-//
-// The offset points into the input [CharStream], relative to
-// the token start index, at which the specified lexerAction should be
-// executed.
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
similarity index 70%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
index dfc28c32b3..be1ba7a7e3 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
@@ -29,20 +29,28 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
l.lexerActions = lexerActions
// Caches the result of {@link //hashCode} since the hash code is an element
- // of the performance-critical {@link ATNConfig//hashCode} operation.
- l.cachedHash = murmurInit(0)
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
for _, a := range lexerActions {
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
}
- l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
return l
}
-// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
-// the input [LexerActionExecutor] followed by a specified
-// [LexerAction].
-// TODO: This does not match the Java code
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
if lexerActionExecutor == nil {
return NewLexerActionExecutor([]LexerAction{lexerAction})
@@ -51,42 +59,47 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
}
-// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// Creates a {@link LexerActionExecutor} which encodes the current offset
// for position-dependent lexer actions.
//
-// Normally, when the executor encounters lexer actions where
-// [LexerAction.isPositionDependent] returns true, it calls
-// [IntStream.Seek] on the input [CharStream] to set the input
-// position to the end of the current token. This behavior provides
-// for efficient [DFA] representation of lexer actions which appear at the end
+// Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule Matches a variable number of
-// characters.
+// characters.
//
-// Prior to traversing a Match transition in the [ATN], the current offset
+// Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing
-// the offsets relative to the token start index, the [DFA] representation of
+// the offsets relative to the token start index, the DFA representation of
// lexer actions which appear in the middle of tokens remains efficient due
-// to sharing among tokens of the same Length, regardless of their absolute
-// position in the input stream.
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
//
-// If the current executor already has offsets assigned to all
-// position-dependent lexer actions, the method returns this instance.
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
//
-// The offset is assigned to all position-dependent
+// @param offset The current offset to assign to all position-dependent
// lexer actions which do not already have offsets assigned.
//
-// The func returns a [LexerActionExecutor] that stores input stream offsets
+// @return A {@link LexerActionExecutor} which stores input stream offsets
// for all position-dependent lexer actions.
+// /
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
var updatedLexerActions []LexerAction
for i := 0; i < len(l.lexerActions); i++ {
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
if l.lexerActions[i].getIsPositionDependent() && !ok {
if updatedLexerActions == nil {
- updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
- updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
}
+
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
similarity index 80%
rename from vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
index fe938b0259..c573b75210 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
@@ -10,8 +10,10 @@ import (
"strings"
)
-//goland:noinspection GoUnusedGlobalVariable
var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
@@ -30,11 +32,11 @@ type ILexerATNSimulator interface {
}
type LexerATNSimulator struct {
- BaseATNSimulator
+ *BaseATNSimulator
recog Lexer
predictionMode int
- mergeCache *JPCMap2
+ mergeCache DoubleDict
startIndex int
Line int
CharPositionInLine int
@@ -44,35 +46,27 @@ type LexerATNSimulator struct {
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
- l := &LexerATNSimulator{
- BaseATNSimulator: BaseATNSimulator{
- atn: atn,
- sharedContextCache: sharedContextCache,
- },
- }
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
l.decisionToDFA = decisionToDFA
l.recog = recog
-
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
-
- // line number 1..n within the input
+ // line number 1..n within the input///
l.Line = 1
-
// The index of the character relative to the beginning of the line
- // 0..n-1
+ // 0..n-1///
l.CharPositionInLine = 0
-
l.mode = LexerDefaultMode
-
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
-
+ // done
return l
}
@@ -120,7 +114,7 @@ func (l *LexerATNSimulator) reset() {
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
@@ -132,7 +126,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
predict := l.execATN(input, next)
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
@@ -140,18 +134,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
- // allow zero-Length tokens
+ // allow zero-length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
@@ -194,7 +188,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
}
}
t = input.LA(1)
- s = target // flip current DFA target becomes new src/from state
+ s = target // flip current DFA target becomes Newsrc/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
@@ -220,39 +214,43 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
return nil
}
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
- if runtimeConfig.lexerATNSimulatorDebug && target != nil {
+ if LexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
-// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
-// computed state and corresponding edge to the [DFA].
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
//
-// The func returns the computed target [DFA] state for the given input symbol t.
-// If this does not lead to a valid [DFA] state, this method
-// returns ATNSimulatorError.
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
- l.getReachableConfigSet(input, s.configs, reach, t)
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
- // cause a fail-over from DFA later.
+ // cause a failover from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
- return l.addDFAEdge(s, t, nil, reach)
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
}
-func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
@@ -267,35 +265,34 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
-// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
-// we can reach upon input t.
-//
-// Parameter reach is a return parameter.
-func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
- // than a runtimeConfig that already reached an accept state for the same rule
+ // than a config that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
- for _, cfg := range closure.configs {
- currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
- if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
continue
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
- fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
- lexerActionExecutor := cfg.lexerActionExecutor
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
- treatEOFAsEpsilon := t == TokenEOF
- config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
@@ -308,7 +305,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATN
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Printf("ACTION %v\n", lexerActionExecutor)
}
// seek to after last char in token
@@ -328,7 +325,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
return nil
}
-func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
@@ -339,24 +336,25 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATN
return configs
}
-// closure since the alternatives within any lexer decision are ordered by
-// preference, this method stops pursuing the closure as soon as an accept
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
-// search from runtimeConfig, all other (potentially reachable) states for
-// this rule would have a lower priority.
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
//
-// The func returns true if an accept state is reached.
-func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
- if runtimeConfig.lexerATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")")
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
@@ -403,10 +401,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs
}
// side-effect: can alter configs.hasSemanticContext
-func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
- configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
- var cfg *ATNConfig
+ var cfg *LexerATNConfig
if trans.getSerializationType() == TransitionRULE {
@@ -437,10 +435,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
pt := trans.(*PredicateTransition)
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
- configs.hasSemanticContext = true
+ configs.SetHasSemanticContext(true)
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
@@ -451,7 +449,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In this case, the config needs to be
+ // isEmpty() is false. In l case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
@@ -478,18 +476,26 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig
return cfg
}
-// evaluatePredicate eEvaluates a predicate specified in the lexer.
+// Evaluate a predicate specified in the lexer.
//
-// If speculative is true, this method was called before
-// [consume] for the Matched character. This method should call
-// [consume] before evaluating the predicate to ensure position
-// sensitive values, including [GetText], [GetLine],
-// and [GetColumn], properly reflect the current
-// lexer state. This method should restore input and the simulator
-// to the original state before returning, i.e. undo the actions made by the
-// call to [Consume].
+// If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
//
-// The func returns true if the specified predicate evaluates to true.
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
@@ -521,7 +527,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
settings.dfaState = dfaState
}
-func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
@@ -533,9 +539,10 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
- //
- suppressEdge := cfgs.hasSemanticContext
- cfgs.hasSemanticContext = false
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
to = l.addDFAState(cfgs, true)
if suppressEdge {
@@ -547,7 +554,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// Only track edges within the DFA bounds
return to
}
- if runtimeConfig.lexerATNSimulatorDebug {
+ if LexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
l.atn.edgeMu.Lock()
@@ -565,12 +572,13 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
-func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
proposed := NewDFAState(-1, configs)
- var firstConfigWithRuleStopState *ATNConfig
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
- for _, cfg := range configs.configs {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
@@ -580,14 +588,14 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
- proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, present := dfa.Get(proposed)
+ existing, present := dfa.states.Get(proposed)
if present {
// This state was already present, so just return it.
@@ -597,11 +605,10 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool
// We need to add the new state
//
- proposed.stateNumber = dfa.Len()
- configs.readOnly = true
- configs.configLookup = nil // Not needed now
+ proposed.stateNumber = dfa.states.Len()
+ configs.SetReadOnly(true)
proposed.configs = configs
- dfa.Put(proposed)
+ dfa.states.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
@@ -613,7 +620,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
-// GetText returns the text [Match]ed so far for the current token.
+// Get the text Matched so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
similarity index 72%
rename from vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
index dfdff000bc..76689615a6 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
@@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
return la
}
+// - Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+//
+// /
const (
- // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
- // a predicate during analysis if
- //
- // seeThruPreds==false
LL1AnalyzerHitPred = TokenInvalidType
)
@@ -38,13 +38,11 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
count := len(s.GetTransitions())
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
-
look[alt] = NewIntervalSet()
- // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim!
- lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
- la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
-
- // Wipe out lookahead for la alternative if we found nothing,
+ lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
look[alt] = nil
@@ -53,31 +51,32 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
return look
}
-// Look computes the set of tokens that can follow s in the [ATN] in the
-// specified ctx.
-//
-// If ctx is nil and the end of the rule containing
-// s is reached, [EPSILON] is added to the result set.
-//
-// If ctx is not nil and the end of the outermost rule is
-// reached, [EOF] is added to the result set.
+// *
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
//
-// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
-// [BlockEndState] to detect epsilon paths through a closure.
+// If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
//
-// Parameter ctx is the complete parser context, or nil if the context
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
// should be ignored
//
-// The func returns the set of tokens that can follow s in the [ATN] in the
-// specified ctx.
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+// /
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
- var lookContext *PredictionContext
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
- la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
- NewBitSet(), true, true)
+ la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
return r
}
@@ -111,17 +110,16 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
-func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
- calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
- c := NewATNConfig6(s, 0, ctx)
+ c := NewBaseATNConfig6(s, 0, ctx)
if lookBusy.Contains(c) {
return
@@ -153,7 +151,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look
return
}
- if ctx.pcType != PredictionContextEmpty {
+ if ctx != BasePredictionContextEMPTY {
removed := calledRuleStack.contains(s.GetRuleIndex())
defer func() {
if removed {
@@ -204,8 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look
}
}
-func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
- calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
similarity index 80%
rename from vendor/github.com/antlr4-go/antlr/v4/parser.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
index fb57ac15db..d26bf06392 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
@@ -48,10 +48,8 @@ type BaseParser struct {
_SyntaxErrors int
}
-// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
-// recovery stuff.
-//
-//goland:noinspection GoUnusedExportedFunction
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
func NewBaseParser(input TokenStream) *BaseParser {
p := new(BaseParser)
@@ -60,46 +58,39 @@ func NewBaseParser(input TokenStream) *BaseParser {
// The input stream.
p.input = nil
-
// The error handling strategy for the parser. The default value is a new
// instance of {@link DefaultErrorStrategy}.
p.errHandler = NewDefaultErrorStrategy()
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
-
- // The ParserRuleContext object for the currently executing rule.
+ // The {@link ParserRuleContext} object for the currently executing rule.
// p.is always non-nil during the parsing process.
p.ctx = nil
-
- // Specifies whether the parser should construct a parse tree during
+ // Specifies whether or not the parser should construct a parse tree during
// the parsing process. The default value is {@code true}.
p.BuildParseTrees = true
-
- // When setTrace(true) is called, a reference to the
- // TraceListener is stored here, so it can be easily removed in a
- // later call to setTrace(false). The listener itself is
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
// implemented as a parser listener so p.field is not directly used by
// other parser methods.
p.tracer = nil
-
- // The list of ParseTreeListener listeners registered to receive
+ // The list of {@link ParseTreeListener} listeners registered to receive
// events during the parse.
p.parseListeners = nil
-
// The number of syntax errors Reported during parsing. p.value is
- // incremented each time NotifyErrorListeners is called.
+ // incremented each time {@link //NotifyErrorListeners} is called.
p._SyntaxErrors = 0
p.SetInputStream(input)
return p
}
-// This field maps from the serialized ATN string to the deserialized [ATN] with
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
// bypass alternatives.
//
-// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
-//
-//goland:noinspection GoUnusedGlobalVariable
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
@@ -152,13 +143,10 @@ func (p *BaseParser) Match(ttype int) Token {
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
- if p.HasError() {
- return nil
- }
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
-
- // we must have conjured up a new token during single token
- // insertion if it's not the current symbol
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
@@ -190,8 +178,9 @@ func (p *BaseParser) MatchWildcard() Token {
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a new token during single token
- // insertion if it's not the current symbol
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
@@ -213,27 +202,33 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
return p.parseListeners
}
-// AddParseListener registers listener to receive events during the parsing process.
+// Registers {@code listener} to receive events during the parsing process.
//
-// To support output-preserving grammar transformations (including but not
+// To support output-preserving grammar transformations (including but not
// limited to left-recursion removal, automated left-factoring, and
// optimized code generation), calls to listener methods during the parse
// may differ substantially from calls made by
-// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
// particular, rule entry and exit events may occur in a different order
// during the parse than after the parser. In addition, calls to certain
-// rule entry methods may be omitted.
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
//
-// With the following specific exceptions, calls to listener events are
-// deterministic, i.e. for identical input the calls to listener
-// methods will be the same.
+// @param listener the listener to add
//
-// - Alterations to the grammar used to generate code may change the
-// behavior of the listener calls.
-// - Alterations to the command line options passed to ANTLR 4 when
-// generating the parser may change the behavior of the listener calls.
-// - Changing the version of the ANTLR Tool used to generate the parser
-// may change the behavior of the listener calls.
+// @panics nilPointerException if {@code} listener is {@code nil}
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
@@ -244,10 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
p.parseListeners = append(p.parseListeners, listener)
}
-// RemoveParseListener removes listener from the list of parse listeners.
+// Remove {@code listener} from the list of parse listeners.
//
-// If listener is nil or has not been added as a parse
-// listener, this func does nothing.
+// If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
@@ -278,7 +274,7 @@ func (p *BaseParser) removeParseListeners() {
p.parseListeners = nil
}
-// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+// Notify any parse listeners of an enter rule event.
func (p *BaseParser) TriggerEnterRuleEvent() {
if p.parseListeners != nil {
ctx := p.ctx
@@ -289,7 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
}
}
-// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
@@ -316,16 +314,19 @@ func (p *BaseParser) GetTokenFactory() TokenFactory {
return p.input.GetTokenSource().GetTokenFactory()
}
-// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+// Tell our token source and error strategy about a Newway to create tokens.//
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
p.input.GetTokenSource().setTokenFactory(factory)
}
-// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// The ATN with bypass alternatives is expensive to create so we create it
// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
func (p *BaseParser) GetATNWithBypassAlts() {
- // TODO - Implement this?
+ // TODO
panic("Not implemented!")
// serializedAtn := p.getSerializedATN()
@@ -353,7 +354,6 @@ func (p *BaseParser) GetATNWithBypassAlts() {
// String id = m.Get("ID")
//
-//goland:noinspection GoUnusedParameter
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
panic("NewParseTreePatternMatcher not implemented!")
@@ -386,16 +386,14 @@ func (p *BaseParser) GetTokenStream() TokenStream {
return p.input
}
-// SetTokenStream installs input as the token stream and resets the parser.
+// Set the token stream and reset the parser.//
func (p *BaseParser) SetTokenStream(input TokenStream) {
p.input = nil
p.reset()
p.input = input
}
-// GetCurrentToken returns the current token at LT(1).
-//
-// [Match] needs to return the current input symbol, which gets put
+// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
@@ -448,7 +446,7 @@ func (p *BaseParser) addContextToParseTree() {
}
}
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
p.SetState(state)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
@@ -476,7 +474,7 @@ func (p *BaseParser) ExitRule() {
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
localctx.SetAltNumber(altNum)
- // if we have a new localctx, make sure we replace existing ctx
+ // if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if p.BuildParseTrees && p.ctx != localctx {
if p.ctx.GetParent() != nil {
@@ -500,7 +498,7 @@ func (p *BaseParser) GetPrecedence() int {
return p.precedenceStack[len(p.precedenceStack)-1]
}
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
p.SetState(state)
p.precedenceStack.Push(precedence)
p.ctx = localctx
@@ -514,7 +512,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, pr
//
// Like {@link //EnterRule} but for recursive rules.
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
previous := p.ctx
previous.SetParent(localctx)
previous.SetInvokingState(state)
@@ -532,7 +530,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state,
}
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- _, _ = p.precedenceStack.Pop()
+ p.precedenceStack.Pop()
p.ctx.SetStop(p.input.LT(-1))
retCtx := p.ctx // save current ctx (return value)
// unroll so ctx is as it was before call to recursive method
@@ -563,22 +561,29 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
return nil
}
-func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
}
-//goland:noinspection GoUnusedParameter
func (p *BaseParser) inContext(context ParserRuleContext) bool {
// TODO: useful in parser?
return false
}
-// IsExpectedToken checks whether symbol can follow the current state in the
-// {ATN}. The behavior of p.method is equivalent to the following, but is
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
// implemented such that the complete context-sensitive follow set does not
// need to be explicitly constructed.
//
-// return getExpectedTokens().contains(symbol)
+//
+// return getExpectedTokens().contains(symbol)
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
func (p *BaseParser) IsExpectedToken(symbol int) bool {
atn := p.Interpreter.atn
ctx := p.ctx
@@ -606,9 +611,11 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
return false
}
-// GetExpectedTokens and returns the set of input symbols which could follow the current parser
-// state and context, as given by [GetState] and [GetContext],
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
@@ -619,7 +626,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
return atn.NextTokens(s, nil)
}
-// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *BaseParser) GetRuleIndex(ruleName string) int {
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
if ok {
@@ -629,10 +636,13 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int {
return -1
}
-// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// Return List<String> of the rule names in your parser instance
// leading up to a call to the current rule. You could override if
// you want more details such as the file/line info of where
// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
if c == nil {
c = p.ctx
@@ -658,16 +668,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
return stack
}
-// GetDFAStrings returns a list of all DFA states used for debugging purposes
+// For debugging and other purposes.//
func (p *BaseParser) GetDFAStrings() string {
return fmt.Sprint(p.Interpreter.decisionToDFA)
}
-// DumpDFA prints the whole of the DFA for debugging
+// For debugging and other purposes.//
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.Len() > 0 {
+ if dfa.states.Len() > 0 {
if seenOne {
fmt.Println()
}
@@ -682,10 +692,8 @@ func (p *BaseParser) GetSourceName() string {
return p.GrammarFileName
}
-// SetTrace installs a trace listener for the parse.
-//
-// During a parse it is sometimes useful to listen in on the rule entry and exit
-// events as well as token Matches. This is for quick and dirty debugging.
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
similarity index 64%
rename from vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
index 724fa17a19..8bcc46a0d9 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
@@ -10,49 +10,31 @@ import (
"strings"
)
-// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
-// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
-// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
-type ClosureBusy struct {
- bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
- desc string
-}
-
-// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
-func NewClosureBusy(desc string) *ClosureBusy {
- return &ClosureBusy{
- desc: desc,
- }
-}
-
-func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
- if c.bMap == nil {
- c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
- }
- return c.bMap.Put(config)
-}
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorTraceATNSim = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
+)
type ParserATNSimulator struct {
- BaseATNSimulator
+ *BaseATNSimulator
parser Parser
predictionMode int
input TokenStream
startIndex int
dfa *DFA
- mergeCache *JPCMap
+ mergeCache *DoubleDict
outerContext ParserRuleContext
}
-//goland:noinspection GoUnusedExportedFunction
func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
- p := &ParserATNSimulator{
- BaseATNSimulator: BaseATNSimulator{
- atn: atn,
- sharedContextCache: sharedContextCache,
- },
- }
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
p.parser = parser
p.decisionToDFA = decisionToDFA
@@ -64,12 +46,12 @@ func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, shared
p.outerContext = nil
p.dfa = nil
// Each prediction operation uses a cache for merge of prediction contexts.
- // Don't keep around as it wastes huge amounts of memory. [JPCMap]
- // isn't Synchronized, but we're ok since two threads shouldn't reuse same
- // parser/atn-simulator object because it can only handle one input at a time.
- // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
- // the merge if we ever see a and b again. Note that (b,a) -> c should
- // also be examined during cache lookup.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
//
p.mergeCache = nil
@@ -87,14 +69,14 @@ func (p *ParserATNSimulator) SetPredictionMode(v int) {
func (p *ParserATNSimulator) reset() {
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
strconv.Itoa(input.LT(1).GetColumn()))
}
+
p.input = input
p.startIndex = input.Index()
p.outerContext = outerContext
@@ -106,15 +88,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
defer func() {
p.dfa = nil
- p.mergeCache = nil // whack cache after each prediction
- // Do not attempt to run a GC now that we're done with the cache as makes the
- // GC overhead terrible for badly formed grammars and has little effect on well formed
- // grammars.
- // I have made some extra effort to try and reduce memory pressure by reusing allocations when
- // possible. However, it can only have a limited effect. The real solution is to encourage grammar
- // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
- // what is happening at runtime, along with using the error listener to report ambiguities.
-
+ p.mergeCache = nil // wack cache after each prediction
input.Seek(index)
input.Release(m)
}()
@@ -139,7 +113,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
if outerContext == nil {
outerContext = ParserRuleContextEmpty
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
@@ -168,52 +142,47 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre
p.atn.stateMu.Unlock()
}
- alt, re := p.execATN(dfa, s0, input, index, outerContext)
- parser.SetError(re)
- if runtimeConfig.parserATNSimulatorDebug {
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
}
return alt
}
-// execATN performs ATN simulation to compute a predicted alternative based
-// upon the remaining input, but also updates the DFA cache to avoid
-// having to traverse the ATN again for the same input sequence.
-//
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
// There are some key conditions we're looking for after computing a new
// set of ATN configs (proposed DFA state):
-//
-// - If the set is empty, there is no viable alternative for current symbol
-// - Does the state uniquely predict an alternative?
-// - Does the state have a conflict that would prevent us from
-// putting it on the work list?
-//
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
//
-// - Add an edge from previous DFA state to potentially NewDFA state, D,
-// - Upon current symbol but only if adding to work list, which means in all
-// cases except no viable alternative (and possibly non-greedy decisions?)
-// - Collecting predicates and adding semantic context to DFA accept states
-// - adding rule context to context-sensitive DFA accept states
-// - Consuming an input symbol
-// - Reporting a conflict
-// - Reporting an ambiguity
-// - Reporting a context sensitivity
-// - Reporting insufficient predicates
-//
-// Cover these cases:
-//
-// - dead end
-// - single alt
-// - single alt + predicates
-// - conflict
-// - conflict + predicates
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
-
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
", DFA state " + s0.String() +
", LA(1)==" + p.getLookaheadName(input) +
@@ -222,7 +191,7 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
previousD := s0
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("s0 = " + s0.String())
}
t := input.LA(1)
@@ -245,17 +214,17 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
input.Seek(startIndex)
alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
if alt != ATNInvalidAltNumber {
- return alt, nil
+ return alt
}
- p.parser.SetError(e)
- return ATNInvalidAltNumber, e
+
+ panic(e)
}
if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
// IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- conflictingAlts := D.configs.conflictingAlts
+ conflictingAlts := D.configs.GetConflictingAlts()
if D.predicates != nil {
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
}
conflictIndex := input.Index()
if conflictIndex != startIndex {
@@ -263,10 +232,10 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
}
conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
if conflictingAlts.length() == 1 {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("Full LL avoided")
}
- return conflictingAlts.minValue(), nil
+ return conflictingAlts.minValue()
}
if conflictIndex != startIndex {
// restore the index so Reporting the fallback to full
@@ -274,18 +243,18 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
input.Seek(conflictIndex)
}
}
- if runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDFADebug {
fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
}
fullCtx := true
s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
- alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
- return alt, re
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
}
if D.isAcceptState {
if D.predicates == nil {
- return D.prediction, nil
+ return D.prediction
}
stopIndex := input.Index()
input.Seek(startIndex)
@@ -293,13 +262,13 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
switch alts.length() {
case 0:
- return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
case 1:
- return alts.minValue(), nil
+ return alts.minValue()
default:
// Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
- return alts.minValue(), nil
+ return alts.minValue()
}
}
previousD = D
@@ -345,8 +314,7 @@ func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int)
// @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, p method
// returns {@link //ERROR}.
-//
-//goland:noinspection GoBoolExpressions
+
func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
reach := p.computeReachSet(previousD.configs, t, false)
@@ -354,12 +322,12 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t
p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
return ATNSimulatorError
}
- // create new target state we'll add to DFA after it's complete
+ // create Newtarget state we'll add to DFA after it's complete
D := NewDFAState(-1, reach)
predictedAlt := p.getUniqueAlt(reach)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
altSubSets := PredictionModegetConflictingAltSubsets(reach)
fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
", previous=" + previousD.configs.String() +
@@ -372,17 +340,17 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t
if predictedAlt != ATNInvalidAltNumber {
// NO CONFLICT, UNIQUELY PREDICTED ALT
D.isAcceptState = true
- D.configs.uniqueAlt = predictedAlt
+ D.configs.SetUniqueAlt(predictedAlt)
D.setPrediction(predictedAlt)
} else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
// MORE THAN ONE VIABLE ALTERNATIVE
- D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
D.requiresFullContext = true
// in SLL-only mode, we will stop at p state and return the minimum alt
D.isAcceptState = true
- D.setPrediction(D.configs.conflictingAlts.minValue())
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
}
- if D.isAcceptState && D.configs.hasSemanticContext {
+ if D.isAcceptState && D.configs.HasSemanticContext() {
p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
if D.predicates != nil {
D.setPrediction(ATNInvalidAltNumber)
@@ -413,17 +381,15 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState
}
// comes back with reach.uniqueAlt set to a valid alt
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATNWithFullContext " + s0.String())
}
fullCtx := true
foundExactAmbig := false
- var reach *ATNConfigSet
+ var reach ATNConfigSet
previous := s0
input.Seek(startIndex)
t := input.LA(1)
@@ -441,23 +407,25 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
// ATN states in SLL implies LL will also get nowhere.
// If conflict in states that dip out, choose min since we
// will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
input.Seek(startIndex)
alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
if alt != ATNInvalidAltNumber {
- return alt, nil
+ return alt
}
- return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+
+ panic(e)
}
altSubSets := PredictionModegetConflictingAltSubsets(reach)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
}
- reach.uniqueAlt = p.getUniqueAlt(reach)
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
// unique prediction?
- if reach.uniqueAlt != ATNInvalidAltNumber {
- predictedAlt = reach.uniqueAlt
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
break
}
if p.predictionMode != PredictionModeLLExactAmbigDetection {
@@ -486,9 +454,9 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
// If the configuration set uniquely predicts an alternative,
// without conflict, then we know that it's a full LL decision
// not SLL.
- if reach.uniqueAlt != ATNInvalidAltNumber {
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
- return predictedAlt, nil
+ return predictedAlt
}
// We do not check predicates here because we have checked them
// on-the-fly when doing full context prediction.
@@ -501,10 +469,10 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
//
// For example, we might know that we have conflicting configurations.
// But, that does not mean that there is no way forward without a
- // conflict. It's possible to have non-conflicting alt subsets as in:
- //
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
// altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
- //
+
// from
//
// [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
@@ -519,15 +487,14 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A
p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
- return predictedAlt, nil
+ return predictedAlt
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
if p.mergeCache == nil {
- p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ p.mergeCache = NewDoubleDict()
}
- intermediate := NewATNConfigSet(fullCtx)
+ intermediate := NewBaseATNConfigSet(fullCtx)
// Configurations already in a rule stop state indicate reaching the end
// of the decision rule (local context) or end of the start rule (full
@@ -539,18 +506,18 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
// ensure that the alternative Matching the longest overall sequence is
// chosen when multiple such configurations can Match the input.
- var skippedStopStates []*ATNConfig
+ var skippedStopStates []*BaseATNConfig
// First figure out where we can reach on input t
- for _, c := range closure.configs {
- if runtimeConfig.parserATNSimulatorDebug {
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
}
if _, ok := c.GetState().(*RuleStopState); ok {
if fullCtx || t == TokenEOF {
- skippedStopStates = append(skippedStopStates, c)
- if runtimeConfig.parserATNSimulatorDebug {
+ skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
fmt.Println("added " + c.String() + " to SkippedStopStates")
}
}
@@ -560,9 +527,9 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
for _, trans := range c.GetState().GetTransitions() {
target := p.getReachableTarget(trans, t)
if target != nil {
- cfg := NewATNConfig4(c, target)
+ cfg := NewBaseATNConfig4(c, target)
intermediate.Add(cfg, p.mergeCache)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("added " + cfg.String() + " to intermediate")
}
}
@@ -570,7 +537,7 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
}
// Now figure out where the reach operation can take us...
- var reach *ATNConfigSet
+ var reach ATNConfigSet
// This block optimizes the reach operation for intermediate sets which
// trivially indicate a termination state for the overall
@@ -598,8 +565,8 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
// operation on the intermediate set to compute its initial value.
//
if reach == nil {
- reach = NewATNConfigSet(fullCtx)
- closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
treatEOFAsEpsilon := t == TokenEOF
amount := len(intermediate.configs)
for k := 0; k < amount; k++ {
@@ -621,10 +588,10 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
//
// This is handled before the configurations in SkippedStopStates,
// because any configurations potentially added from that list are
- // already guaranteed to meet this condition whether it's
+ // already guaranteed to meet p condition whether or not it's
// required.
//
- reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
}
// If SkippedStopStates!=nil, then it contains at least one
// configuration. For full-context reach operations, these
@@ -640,40 +607,41 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC
}
}
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
}
- if len(reach.configs) == 0 {
+ if len(reach.GetItems()) == 0 {
return nil
}
return reach
}
-// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
-// configs which are in a [RuleStopState]. If all
-// configurations in configs are already in a rule stop state, this
-// method simply returns configs.
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
//
-// When lookToEndOfRule is true, this method uses
-// [ATN].[NextTokens] for each configuration in configs which is
+// When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
// not already in a rule stop state to see if a rule stop state is reachable
-// from the configuration via epsilon-only transitions.
+// from the configuration via epsilon-only transitions.
//
-// When lookToEndOfRule is true, this method checks for rule stop states
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
// reachable by epsilon-only transitions from each configuration in
-// configs.
+// {@code configs}.
//
-// The func returns configs if all configurations in configs are in a
-// rule stop state, otherwise it returns a new configuration set containing only
-// the configurations from configs which are in a rule stop state
-func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
if PredictionModeallConfigsInRuleStopStates(configs) {
return configs
}
- result := NewATNConfigSet(configs.fullCtx)
- for _, config := range configs.configs {
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
if _, ok := config.GetState().(*RuleStopState); ok {
result.Add(config, p.mergeCache)
continue
@@ -682,81 +650,91 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConf
NextTokens := p.atn.NextTokens(config.GetState(), nil)
if NextTokens.contains(TokenEpsilon) {
endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
- result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
}
}
}
return result
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
// always at least the implicit call to start rule
initialContext := predictionContextFromRuleContext(p.atn, ctx)
- configs := NewATNConfigSet(fullCtx)
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ configs := NewBaseATNConfigSet(fullCtx)
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("computeStartState from ATN state " + a.String() +
" initialContext=" + initialContext.String())
}
for i := 0; i < len(a.GetTransitions()); i++ {
target := a.GetTransitions()[i].getTarget()
- c := NewATNConfig6(target, i+1, initialContext)
- closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
p.closure(c, configs, closureBusy, true, fullCtx, false)
}
return configs
}
-// applyPrecedenceFilter transforms the start state computed by
-// [computeStartState] to the special start state used by a
-// precedence [DFA] for a particular precedence value. The transformation
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
// process applies the following changes to the start state's configuration
// set.
//
-// 1. Evaluate the precedence predicates for each configuration using
-// [SemanticContext].evalPrecedence.
-// 2. Remove all configurations which predict an alternative greater than
-// 1, for which another configuration that predicts alternative 1 is in the
-// same ATN state with the same prediction context.
-//
-// Transformation 2 is valid for the following reasons:
-//
-// - The closure block cannot contain any epsilon transitions which bypass
-// the body of the closure, so all states reachable via alternative 1 are
-// part of the precedence alternatives of the transformed left-recursive
-// rule.
-// - The "primary" portion of a left recursive rule cannot contain an
-// epsilon transition, so the only way an alternative other than 1 can exist
-// in a state that is also reachable via alternative 1 is by nesting calls
-// to the left-recursive rule, with the outer calls not being at the
-// preferred precedence level.
-//
-// The prediction context must be considered by this filter to address
-// situations like the following:
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
+//
+// - Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+// - Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
//
-// In the above grammar, the [ATN] state immediately before the token
-// reference 'a' in letterA is reachable from the left edge
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
// of both the primary and closure blocks of the left-recursive rule
-// statement. The prediction context associated with each of these
+// {@code statement}. The prediction context associated with each of these
// configurations distinguishes between them, and prevents the alternative
-// which stepped out to prog, and then back in to statement
+// which stepped out to {@code prog} (and then back in to {@code statement}
// from being eliminated by the filter.
+//
//
-// The func returns the transformed configuration set representing the start state
-// for a precedence [DFA] at a particular precedence level (determined by
-// calling [Parser].getPrecedence).
-func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
- statesFromAlt1 := make(map[int]*PredictionContext)
- configSet := NewATNConfigSet(configs.fullCtx)
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
- for _, config := range configs.configs {
+ for _, config := range configs.GetItems() {
// handle alt 1 first
if config.GetAlt() != 1 {
continue
@@ -768,12 +746,12 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNCo
}
statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
if updatedContext != config.GetSemanticContext() {
- configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
} else {
configSet.Add(config, p.mergeCache)
}
}
- for _, config := range configs.configs {
+ for _, config := range configs.GetItems() {
if config.GetAlt() == 1 {
// already handled
@@ -802,11 +780,10 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN
return nil
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
altToPred := make([]SemanticContext, nalts+1)
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if ambigAlts.contains(c.GetAlt()) {
altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
}
@@ -820,11 +797,11 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *AT
nPredAlts++
}
}
- // unambiguous alts are nil in altToPred
+ // nonambig alts are nil in altToPred
if nPredAlts == 0 {
altToPred = nil
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
}
return altToPred
@@ -835,7 +812,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
containsPredicate := false
for i := 1; i < len(altToPred); i++ {
pred := altToPred[i]
- // un-predicated is indicated by SemanticContextNONE
+ // unpredicated is indicated by SemanticContextNONE
if ambigAlts != nil && ambigAlts.contains(i) {
pairs = append(pairs, NewPredPrediction(pred, i))
}
@@ -849,42 +826,51 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
return pairs
}
-// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
-// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
-// Error state was reached during [ATN] simulation.
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
//
-// The default implementation of this method uses the following
-// algorithm to identify an [ATN] configuration which successfully parsed the
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
// decision entry rule. Choosing such an alternative ensures that the
-// [ParserRuleContext] returned by the calling rule will be complete
+// {@link ParserRuleContext} returned by the calling rule will be complete
// and valid, and the syntax error will be Reported later at a more
-// localized location.
+// localized location.
//
-// - If a syntactically valid path or paths reach the end of the decision rule, and
-// they are semantically valid if predicated, return the min associated alt.
-// - Else, if a semantically invalid but syntactically valid path exist
-// or paths exist, return the minimum associated alt.
-// - Otherwise, return [ATNInvalidAltNumber].
+//
+// - If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
+//
//
+//
// In some scenarios, the algorithm described above could predict an
-// alternative which will result in a [FailedPredicateException] in
-// the parser. Specifically, this could occur if the only configuration
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
// capable of successfully parsing to the end of the decision rule is
-// blocked by a semantic predicate. By choosing this alternative within
-// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
-// [FailedPredicateException] in the parser will identify the specific
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
// predicate which is preventing the parser from successfully parsing the
// decision rule, which helps developers identify and correct logic errors
// in semantic predicates.
+//
//
-// pass in the configs holding ATN configurations which were valid immediately before
-// the ERROR state was reached, outerContext as the initial parser context from the paper
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
// or the parser stack at the instant before prediction commences.
//
-// The func returns the value to return from [AdaptivePredict], or
-// [ATNInvalidAltNumber] if a suitable alternative was not
-// identified and [AdaptivePredict] should report an error instead.
-func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
semValidConfigs := cfgs[0]
semInvalidConfigs := cfgs[1]
@@ -893,7 +879,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry
return alt
}
// Is there a syntactically valid path with a failed pred?
- if len(semInvalidConfigs.configs) > 0 {
+ if len(semInvalidConfigs.GetItems()) > 0 {
alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
if alt != ATNInvalidAltNumber { // syntactically viable path exists
return alt
@@ -902,10 +888,10 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry
return ATNInvalidAltNumber
}
-func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
alts := NewIntervalSet()
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
_, ok := c.GetState().(*RuleStopState)
if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
@@ -929,14 +915,14 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNCon
// prediction, which is where predicates need to evaluate.
type ATNConfigSetPair struct {
- item0, item1 *ATNConfigSet
+ item0, item1 ATNConfigSet
}
-func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
- succeeded := NewATNConfigSet(configs.fullCtx)
- failed := NewATNConfigSet(configs.fullCtx)
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if c.GetSemanticContext() != SemanticContextNone {
predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
if predicateEvaluationResult {
@@ -948,16 +934,15 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfig
succeeded.Add(c, nil)
}
}
- return []*ATNConfigSet{succeeded, failed}
+ return []ATNConfigSet{succeeded, failed}
}
-// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
-// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
-// un-predicated runtimeConfig which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
+// Look through a list of predicate/alt pairs, returning alts for the
//
-//goland:noinspection GoBoolExpressions
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
predictions := NewBitSet()
for i := 0; i < len(predPredictions); i++ {
@@ -971,11 +956,11 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
}
predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
}
if predicateEvaluationResult {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
}
predictions.add(pair.alt)
@@ -987,82 +972,19 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
return predictions
}
-func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
initialDepth := 0
p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEOFAsEpsilon)
}
-func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("closure(" + config.String() + ")")
- }
-
- var stack []*ATNConfig
- visited := make(map[*ATNConfig]bool)
-
- stack = append(stack, config)
-
- for len(stack) > 0 {
- currConfig := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
-
- if _, ok := visited[currConfig]; ok {
- continue
- }
- visited[currConfig] = true
-
- if _, ok := currConfig.GetState().(*RuleStopState); ok {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if !currConfig.GetContext().isEmpty() {
- for i := 0; i < currConfig.GetContext().length(); i++ {
- if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
- if fullCtx {
- nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
- configs.Add(nb, p.mergeCache)
- continue
- } else {
- // we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
- }
- p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
- continue
- }
- returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
- newContext := currConfig.GetContext().GetParent(i) // "pop" return state
-
- c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
-
- stack = append(stack, c)
- }
- continue
- } else if fullCtx {
- // reached end of start rule
- configs.Add(currConfig, p.mergeCache)
- continue
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
- }
- }
+ //fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
}
-
- p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
-}
-
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("closure(" + config.String() + ")")
}
if _, ok := config.GetState().(*RuleStopState); ok {
@@ -1072,12 +994,11 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
for i := 0; i < config.GetContext().length(); i++ {
if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
if fullCtx {
- nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
- configs.Add(nb, p.mergeCache)
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
continue
} else {
// we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
}
p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
@@ -1087,7 +1008,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
returnState := p.atn.states[config.GetContext().getReturnState(i)]
newContext := config.GetContext().GetParent(i) // "pop" return state
- c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
// While we have context to pop back from, we may have
// gotten that context AFTER having falling off a rule.
// Make sure we track that we are now out of context.
@@ -1101,7 +1022,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
return
} else {
// else if we have no context info, just chase follow links (if greedy)
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
}
}
@@ -1109,10 +1030,8 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig
p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
}
-// Do the actual work of walking epsilon edges
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
state := config.GetState()
// optimization
if !state.GetEpsilonOnlyTransitions() {
@@ -1129,7 +1048,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
_, ok := t.(*ActionTransition)
continueCollecting := collectPredicates && !ok
c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
- if c != nil {
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
newDepth := depth
if _, ok := config.GetState().(*RuleStopState); ok {
@@ -1137,7 +1056,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
// We can't get here if incoming config was rule stop and we had context
// track how far we dip into outer context. Might
// come in handy and we avoid evaluating context dependent
- // preds if this is > 0.
+ // preds if p is > 0.
if p.dfa != nil && p.dfa.getPrecedenceDfa() {
if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
@@ -1153,9 +1072,9 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
continue
}
- configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
newDepth--
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("dips into outer ctx: " + c.String())
}
} else {
@@ -1179,9 +1098,8 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
- if !runtimeConfig.lRLoopEntryBranchOpt {
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
+ if TurnOffLRLoopEntryBranchOpt {
return false
}
@@ -1278,7 +1196,7 @@ func (p *ParserATNSimulator) getRuleName(index int) string {
return sb.String()
}
-func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
switch t.getSerializationType() {
case TransitionRULE:
@@ -1290,13 +1208,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c
case TransitionACTION:
return p.actionTransition(config, t.(*ActionTransition))
case TransitionEPSILON:
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
case TransitionATOM, TransitionRANGE, TransitionSET:
// EOF transitions act like epsilon transitions after the first EOF
// transition is traversed
if treatEOFAsEpsilon {
if t.Matches(TokenEOF, 0, 1) {
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
}
}
return nil
@@ -1305,63 +1223,60 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
}
- return NewATNConfig4(config, t.getTarget())
+ return NewBaseATNConfig4(config, t.getTarget())
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
- pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
if p.parser != nil {
fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
}
}
- var c *ATNConfig
+ var c *BaseATNConfig
if collectPredicates && inContext {
if fullCtx {
// In full context mode, we can evaluate predicates on-the-fly
// during closure, which dramatically reduces the size of
- // the runtimeConfig sets. It also obviates the need to test predicates
+ // the config sets. It also obviates the need to test predicates
// later during conflict resolution.
currentPosition := p.input.Index()
p.input.Seek(p.startIndex)
predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
p.input.Seek(currentPosition)
if predSucceeds {
- c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
}
} else {
newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
}
} else {
- c = NewATNConfig4(config, pt.getTarget())
+ c = NewBaseATNConfig4(config, pt.getTarget())
}
- if runtimeConfig.parserATNSimulatorDebug {
- fmt.Println("runtimeConfig from pred transition=" + c.String())
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
}
return c
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
if p.parser != nil {
fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
}
}
- var c *ATNConfig
+ var c *BaseATNConfig
if collectPredicates && (!pt.isCtxDependent || inContext) {
if fullCtx {
// In full context mode, we can evaluate predicates on-the-fly
@@ -1373,92 +1288,78 @@ func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTran
predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
p.input.Seek(currentPosition)
if predSucceeds {
- c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
}
} else {
newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
}
} else {
- c = NewATNConfig4(config, pt.getTarget())
+ c = NewBaseATNConfig4(config, pt.getTarget())
}
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("config from pred transition=" + c.String())
}
return c
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
- if runtimeConfig.parserATNSimulatorDebug {
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
}
returnState := t.followState
newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
- return NewATNConfig1(config, t.getTarget(), newContext)
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
}
-func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
altsets := PredictionModegetConflictingAltSubsets(configs)
return PredictionModeGetAlts(altsets)
}
-// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// Sam pointed out a problem with the previous definition, v3, of
// ambiguous states. If we have another state associated with conflicting
// alternatives, we should keep going. For example, the following grammar
//
-// s : (ID | ID ID?) ;
-//
-// When the [ATN] simulation reaches the state before ;, it has a [DFA]
-// state that looks like:
-//
-// [12|1|[], 6|2|[], 12|2|[]].
-//
-// Naturally
-//
-// 12|1|[] and 12|2|[]
-//
-// conflict, but we cannot stop processing this node
-// because alternative to has another way to continue, via
-//
-// [6|2|[]].
+// s : (ID | ID ID?) ''
//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
// The key is that we have a single state that has config's only associated
// with a single alternative, 2, and crucially the state transitions
// among the configurations are all non-epsilon transitions. That means
// we don't consider any conflicts that include alternative 2. So, we
// ignore the conflict between alts 1 and 2. We ignore a set of
// conflicting alts when there is an intersection with an alternative
-// associated with a single alt state in the state config-list map.
+// associated with a single alt state in the state&rarrconfig-list map.
//
// It's also the case that we might have two conflicting configurations but
-// also a 3rd non-conflicting configuration for a different alternative:
-//
-// [1|1|[], 1|2|[], 8|3|[]].
-//
-// This can come about from grammar:
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
//
-// a : A | A | A B
+// a : A | A | A B
//
// After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue, so we do not
-// stop working on this state.
-//
-// In the previous example, we're concerned
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
// with states associated with the conflicting alternatives. Here alt
// 3 is not associated with the conflicting configs, but since we can continue
// looking for input reasonably, I don't declare the state done. We
// ignore a set of conflicting alts when we have an alternative
// that we still need to pursue.
-func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
var conflictingAlts *BitSet
- if configs.uniqueAlt != ATNInvalidAltNumber {
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
conflictingAlts = NewBitSet()
- conflictingAlts.add(configs.uniqueAlt)
+ conflictingAlts.add(configs.GetUniqueAlt())
} else {
- conflictingAlts = configs.conflictingAlts
+ conflictingAlts = configs.GetConflictingAlts()
}
return conflictingAlts
}
@@ -1483,10 +1384,11 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
return p.GetTokenName(input.LA(1))
}
-// Used for debugging in [AdaptivePredict] around [execATN], but I cut
-// it out for clarity now that alg. works well. We can leave this
-// "dead" code for a bit.
-func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+// Used for debugging in AdaptivePredict around execATN but I cut
+//
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
panic("Not implemented")
@@ -1516,13 +1418,13 @@ func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
// }
}
-func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
}
-func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
alt := ATNInvalidAltNumber
- for _, c := range configs.configs {
+ for _, c := range configs.GetItems() {
if alt == ATNInvalidAltNumber {
alt = c.GetAlt() // found first alt
} else if c.GetAlt() != alt {
@@ -1550,10 +1452,8 @@ func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
// @return If {@code to} is {@code nil}, p method returns {@code nil}
// otherwise p method returns the result of calling {@link //addDFAState}
// on {@code to}
-//
-//goland:noinspection GoBoolExpressions
func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
}
if to == nil {
@@ -1572,7 +1472,7 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
from.setIthEdge(t+1, to) // connect
p.atn.edgeMu.Unlock()
- if runtimeConfig.parserATNSimulatorDebug {
+ if ParserATNSimulatorDebug {
var names []string
if p.parser != nil {
names = p.parser.GetLiteralNames()
@@ -1583,49 +1483,48 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
return to
}
-// addDFAState adds state D to the [DFA] if it is not already present, and returns
-// the actual instance stored in the [DFA]. If a state equivalent to D
-// is already in the [DFA], the existing state is returned. Otherwise, this
-// method returns D after adding it to the [DFA].
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
//
-// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
-// does not change the DFA.
+// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
//
-//goland:noinspection GoBoolExpressions
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
if d == ATNSimulatorError {
return d
}
-
- existing, present := dfa.Get(d)
+ existing, present := dfa.states.Get(d)
if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ if ParserATNSimulatorTraceATNSim {
fmt.Print("addDFAState " + d.String() + " exists")
}
return existing
}
- // The state will be added if not already there or we will be given back the existing state struct
- // if it is present.
+ // The state was not present, so update it with configs
//
- d.stateNumber = dfa.Len()
- if !d.configs.readOnly {
- d.configs.OptimizeConfigs(&p.BaseATNSimulator)
- d.configs.readOnly = true
- d.configs.configLookup = nil
+ d.stateNumber = dfa.states.Len()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
}
- dfa.Put(d)
-
- if runtimeConfig.parserATNSimulatorTraceATNSim {
+ dfa.states.Put(d)
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("addDFAState new " + d.String())
}
return d
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
@@ -1635,9 +1534,8 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl
}
}
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
@@ -1647,15 +1545,10 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int,
}
}
-// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
-//
-// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
-// so that they can see that this is happening and can take action if they want to.
-//
-//goland:noinspection GoBoolExpressions
-func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
- exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
- if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
interval := NewInterval(startIndex, stopIndex+1)
fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
similarity index 77%
rename from vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
index c249bc1385..1c8cee7479 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
@@ -31,9 +31,7 @@ type ParserRuleContext interface {
}
type BaseParserRuleContext struct {
- parentCtx RuleContext
- invokingState int
- RuleIndex int
+ *BaseRuleContext
start, stop Token
exception RecognitionException
@@ -42,22 +40,8 @@ type BaseParserRuleContext struct {
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
- InitBaseParserRuleContext(prc, parent, invokingStateNumber)
- return prc
-}
-
-func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
- // What context invoked b rule?
- prc.parentCtx = parent
- // What state invoked the rule associated with b context?
- // The "return address" is the followState of invokingState
- // If parent is nil, b should be -1.
- if parent == nil {
- prc.invokingState = -1
- } else {
- prc.invokingState = invokingStateNumber
- }
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
@@ -72,6 +56,8 @@ func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleCont
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
+
+ return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
@@ -104,15 +90,14 @@ func (prc *BaseParserRuleContext) GetText() string {
return s
}
-// EnterRule is called when any rule is entered.
-func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
}
-// ExitRule is called when any rule is exited.
-func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
}
-// * Does not set parent link other add methods do that
+// * Does not set parent link other add methods do that///
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
@@ -135,9 +120,10 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
return child
}
-// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
-// we entered a rule. If we have a label, we will need to remove
-// the generic ruleContext object.
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
@@ -307,7 +293,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
return len(prc.children)
}
-func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
@@ -354,50 +340,6 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
-func (prc *BaseParserRuleContext) SetParent(v Tree) {
- if v == nil {
- prc.parentCtx = nil
- } else {
- prc.parentCtx = v.(RuleContext)
- }
-}
-
-func (prc *BaseParserRuleContext) GetInvokingState() int {
- return prc.invokingState
-}
-
-func (prc *BaseParserRuleContext) SetInvokingState(t int) {
- prc.invokingState = t
-}
-
-func (prc *BaseParserRuleContext) GetRuleIndex() int {
- return prc.RuleIndex
-}
-
-func (prc *BaseParserRuleContext) GetAltNumber() int {
- return ATNInvalidAltNumber
-}
-
-func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
-
-// IsEmpty returns true if the context of b is empty.
-//
-// A context is empty if there is no invoking state, meaning nobody calls
-// current context.
-func (prc *BaseParserRuleContext) IsEmpty() bool {
- return prc.invokingState == -1
-}
-
-// GetParent returns the combined text of all child nodes. This method only considers
-// tokens which have been added to the parse tree.
-//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of this
-// method.
-func (prc *BaseParserRuleContext) GetParent() Tree {
- return prc.parentCtx
-}
-
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
@@ -408,7 +350,6 @@ type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
-//goland:noinspection GoUnusedExportedFunction
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
new file mode 100644
index 0000000000..ba62af3610
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
@@ -0,0 +1,806 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ Hash() int
+ Equals(interface{}) bool
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+ var cachedHash int
+ if parent != nil {
+ cachedHash = calculateHash(parent, returnState)
+ } else {
+ cachedHash = calculateEmptyHash()
+ }
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(cachedHash)
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) Hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ }
+ if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != otherP.getReturnState(0) {
+ return false
+ }
+ if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+ p.cachedHash = calculateEmptyHash()
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) Hash() int {
+ return e.cachedHash
+}
+
+func (e *EmptyPredictionContext) Equals(other interface{}) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(hash)
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+// Equals is the default comparison function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Equals(o interface{}) bool {
+ if a == o {
+ return true
+ }
+ other, ok := o.(*ArrayPredictionContext)
+ if !ok {
+ return false
+ }
+ if a.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(a.returnStates, other.returnStates) &&
+ slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+// Hash is the default hash function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Hash() int {
+ return a.BasePredictionContext.cachedHash
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
+ // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
+ // from it.
+ // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
+ // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
+ // either of them.
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+
+ // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
+ // here.
+ //
+ // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
+
+ var arp, arb *ArrayPredictionContext
+ var ok bool
+ if arp, ok = a.(*ArrayPredictionContext); ok {
+ } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ } else if _, ok = a.(*EmptyPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ if arb, ok = b.(*ArrayPredictionContext); ok {
+ } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ } else if _, ok = b.(*EmptyPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
+ }
+
+ // Both arp and arb
+ return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
+}
+
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.Hash(), b.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.Hash(), a.Hash())
+ if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
new file mode 100644
index 0000000000..7b9b72fab1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// Computes the SLL prediction termination condition.
+//
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+//
+// COMBINED SLL+LL PARSING
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// HEURISTIC
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
+//
+// When the ATN simulation reaches the state before {@code ”}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+// It also let's us continue for this rule:
+//
+// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// PURE SLL PARSING
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// PREDICATES IN SLL+LL PARSING
+//
+// SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+// Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x”} when looking for conflicts in
+// the following configurations.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
+//
+// If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Full LL prediction termination.
+//
+// Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+// Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+// The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+// If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// CONFLICTING CONFIGS
+//
+// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// CONTINUE/STOP RULE
+//
+// Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// CASES
+//
+//
+//
+// - no conflicts and more than 1 alternative in set => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
+// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
+// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1,3}} => continue
+//
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
+// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1}} => stop and predict 1
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {1}} = {@code {1}} => stop and predict 1, can announce
+// ambiguity {@code {1,2}}
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
+// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {2}} = {@code {1,2}} => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
+// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {3}} = {@code {1,3}} => continue
+//
+//
+//
+// EXACT AMBIGUITY DETECTION
+//
+// If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+// |A_i|>1
and
+// A_i = A_j
for all i, j.
+//
+// In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
+
+ for _, c := range configs.GetItems() {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
similarity index 70%
rename from vendor/github.com/antlr4-go/antlr/v4/recognizer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
index dcb8548cd1..bfe542d091 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
@@ -26,9 +26,6 @@ type Recognizer interface {
RemoveErrorListeners()
GetATN() *ATN
GetErrorListenerDispatch() ErrorListener
- HasError() bool
- GetError() RecognitionException
- SetError(RecognitionException)
}
type BaseRecognizer struct {
@@ -39,7 +36,6 @@ type BaseRecognizer struct {
LiteralNames []string
SymbolicNames []string
GrammarFileName string
- SynErr RecognitionException
}
func NewBaseRecognizer() *BaseRecognizer {
@@ -49,32 +45,17 @@ func NewBaseRecognizer() *BaseRecognizer {
return rec
}
-//goland:noinspection GoUnusedGlobalVariable
var tokenTypeMapCache = make(map[string]int)
-
-//goland:noinspection GoUnusedGlobalVariable
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.13.1"
+ runtimeVersion := "4.12.0"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
}
-func (b *BaseRecognizer) SetError(err RecognitionException) {
- b.SynErr = err
-}
-
-func (b *BaseRecognizer) HasError() bool {
- return b.SynErr != nil
-}
-
-func (b *BaseRecognizer) GetError() RecognitionException {
- return b.SynErr
-}
-
-func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
@@ -124,11 +105,9 @@ func (b *BaseRecognizer) SetState(v int) {
// return result
//}
-// GetRuleIndexMap Get a map from rule names to rule indexes.
+// Get a map from rule names to rule indexes.
//
-// Used for XPath and tree pattern compilation.
-//
-// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+// Used for XPath and tree pattern compilation.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
@@ -145,8 +124,7 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
// return result
}
-// GetTokenType get the token type based upon its name
-func (b *BaseRecognizer) GetTokenType(_ string) int {
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = b.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) {
@@ -184,27 +162,26 @@ func (b *BaseRecognizer) GetTokenType(_ string) int {
// }
//}
-// GetErrorHeader returns the error header, normally line/character position information.
-//
-// Can be overridden in sub structs embedding BaseRecognizer.
+// What is the error header, normally line/character position information?//
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
line := e.GetOffendingToken().GetLine()
column := e.GetOffendingToken().GetColumn()
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
}
-// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+// How should a token be displayed in an error message? The default
//
-// The default is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
//
-// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
-// implementations of [ANTLRErrorStrategy] may provide a similar
-// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -228,14 +205,12 @@ func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
return NewProxyErrorListener(b.listeners)
}
-// Sempred embedding structs need to override this if there are sempreds or actions
-// that the ATN interpreter needs to execute
-func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
return true
}
-// Precpred embedding structs need to override this if there are preceding predicates
-// that the ATN interpreter needs to execute
-func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
return true
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
new file mode 100644
index 0000000000..210699ba23
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
similarity index 92%
rename from vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
index 68cb9061eb..a702e99def 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
@@ -9,13 +9,14 @@ import (
"strconv"
)
-// SemanticContext is a tree structure used to record the semantic context in which
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
//
-// an ATN configuration is valid. It's either a single predicate,
-// a conjunction p1 && p2, or a sum of products p1 || p2.
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
//
-// I have scoped the AND, OR, and Predicate subclasses of
-// [SemanticContext] within the scope of this outer ``class''
+
type SemanticContext interface {
Equals(other Collectable[SemanticContext]) bool
Hash() int
@@ -79,7 +80,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
var SemanticContextNone = NewPredicate(-1, -1, false)
-func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
}
@@ -197,7 +198,7 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -229,7 +230,9 @@ func NewAND(a, b SemanticContext) *AND {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- copy(opnds, vs)
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
and := new(AND)
and.opnds = opnds
@@ -313,12 +316,12 @@ func (a *AND) Hash() int {
return murmurFinish(h, len(a.opnds))
}
-func (o *OR) Hash() int {
- h := murmurInit(41) // Init with o value different from AND
- for _, op := range o.opnds {
+func (a *OR) Hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
h = murmurUpdate(h, op.Hash())
}
- return murmurFinish(h, len(o.opnds))
+ return murmurFinish(h, len(a.opnds))
}
func (a *AND) String() string {
@@ -346,7 +349,7 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -379,7 +382,9 @@ func NewOR(a, b SemanticContext) *OR {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- copy(opnds, vs)
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
o := new(OR)
o.opnds = opnds
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
similarity index 74%
rename from vendor/github.com/antlr4-go/antlr/v4/token.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
index f5bc34229d..f73b06bc6a 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/token.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
@@ -35,8 +35,6 @@ type Token interface {
GetTokenSource() TokenSource
GetInputStream() CharStream
-
- String() string
}
type BaseToken struct {
@@ -55,7 +53,7 @@ type BaseToken struct {
const (
TokenInvalidType = 0
- // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
// and did not follow it despite needing to.
TokenEpsilon = -2
@@ -63,16 +61,15 @@ const (
TokenEOF = -1
- // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
- //
- // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // All tokens go to the parser (unless Skip() is called in that rule)
// on a particular "channel". The parser tunes to a particular channel
// so that whitespace etc... can go to the parser on a "hidden" channel.
+
TokenDefaultChannel = 0
- // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
- //
- // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
TokenHiddenChannel = 1
)
@@ -104,25 +101,6 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
return b.source
}
-func (b *BaseToken) GetText() string {
- if b.text != "" {
- return b.text
- }
- input := b.GetInputStream()
- if input == nil {
- return ""
- }
- n := input.Size()
- if b.GetStart() < n && b.GetStop() < n {
- return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop()))
- }
- return ""
-}
-
-func (b *BaseToken) SetText(text string) {
- b.text = text
-}
-
func (b *BaseToken) GetTokenIndex() int {
return b.tokenIndex
}
@@ -139,45 +117,22 @@ func (b *BaseToken) GetInputStream() CharStream {
return b.source.charStream
}
-func (b *BaseToken) String() string {
- txt := b.GetText()
- if txt != "" {
- txt = strings.Replace(txt, "\n", "\\n", -1)
- txt = strings.Replace(txt, "\r", "\\r", -1)
- txt = strings.Replace(txt, "\t", "\\t", -1)
- } else {
- txt = ""
- }
-
- var ch string
- if b.GetChannel() > 0 {
- ch = ",channel=" + strconv.Itoa(b.GetChannel())
- } else {
- ch = ""
- }
-
- return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" +
- txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" +
- ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]"
-}
-
type CommonToken struct {
- BaseToken
+ *BaseToken
}
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
- t := &CommonToken{
- BaseToken: BaseToken{
- source: source,
- tokenType: tokenType,
- channel: channel,
- start: start,
- stop: stop,
- tokenIndex: -1,
- },
- }
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
if t.source.tokenSource != nil {
t.line = source.tokenSource.GetLine()
t.column = source.tokenSource.GetCharPositionInLine()
@@ -211,3 +166,44 @@ func (c *CommonToken) clone() *CommonToken {
t.text = c.GetText()
return t
}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/token_source.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
similarity index 90%
rename from vendor/github.com/antlr4-go/antlr/v4/token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
index bf4ff6633e..1527d43f60 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
@@ -8,14 +8,13 @@ type TokenStream interface {
IntStream
LT(k int) Token
- Reset()
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
- GetTextFromInterval(Interval) string
+ GetTextFromInterval(*Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
similarity index 73%
rename from vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
index ccf59b465c..b3e38af344 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
@@ -86,15 +86,14 @@ import (
// first example shows.
const (
- DefaultProgramName = "default"
- ProgramInitSize = 100
- MinTokenIndex = 0
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
type RewriteOperation interface {
-
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
Execute(buffer *bytes.Buffer) int
@@ -113,19 +112,19 @@ type RewriteOperation interface {
type BaseRewriteOperation struct {
//Current index of rewrites list
- instructionIndex int
+ instruction_index int
//Token buffer index
index int
//Substitution text
text string
//Actual operation name
- opName string
+ op_name string
//Pointer to token steam
tokens TokenStream
}
func (op *BaseRewriteOperation) GetInstructionIndex() int {
- return op.instructionIndex
+ return op.instruction_index
}
func (op *BaseRewriteOperation) GetIndex() int {
@@ -137,7 +136,7 @@ func (op *BaseRewriteOperation) GetText() string {
}
func (op *BaseRewriteOperation) GetOpName() string {
- return op.opName
+ return op.op_name
}
func (op *BaseRewriteOperation) GetTokens() TokenStream {
@@ -145,7 +144,7 @@ func (op *BaseRewriteOperation) GetTokens() TokenStream {
}
func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
- op.instructionIndex = val
+ op.instruction_index = val
}
func (op *BaseRewriteOperation) SetIndex(val int) {
@@ -157,20 +156,20 @@ func (op *BaseRewriteOperation) SetText(val string) {
}
func (op *BaseRewriteOperation) SetOpName(val string) {
- op.opName = val
+ op.op_name = val
}
func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
op.tokens = val
}
-func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
return op.index
}
func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
- op.opName,
+ op.op_name,
op.tokens.Get(op.GetIndex()),
op.text,
)
@@ -183,10 +182,10 @@ type InsertBeforeOp struct {
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index,
- text: text,
- opName: "InsertBeforeOp",
- tokens: stream,
+ index: index,
+ text: text,
+ op_name: "InsertBeforeOp",
+ tokens: stream,
}}
}
@@ -202,21 +201,20 @@ func (op *InsertBeforeOp) String() string {
return op.BaseRewriteOperation.String()
}
-// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
-// first and then the "insert before" instructions at same index. Implementation
-// of "insert after" is "insert before index+1".
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
type InsertAfterOp struct {
BaseRewriteOperation
}
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
- return &InsertAfterOp{
- BaseRewriteOperation: BaseRewriteOperation{
- index: index + 1,
- text: text,
- tokens: stream,
- },
- }
+ return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ }}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
@@ -231,7 +229,7 @@ func (op *InsertAfterOp) String() string {
return op.BaseRewriteOperation.String()
}
-// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
type ReplaceOp struct {
BaseRewriteOperation
@@ -241,10 +239,10 @@ type ReplaceOp struct {
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
return &ReplaceOp{
BaseRewriteOperation: BaseRewriteOperation{
- index: from,
- text: text,
- opName: "ReplaceOp",
- tokens: stream,
+ index: from,
+ text: text,
+ op_name: "ReplaceOp",
+ tokens: stream,
},
LastIndex: to,
}
@@ -272,17 +270,17 @@ type TokenStreamRewriter struct {
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- lastRewriteTokenIndexes map[string]int
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
}
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
return &TokenStreamRewriter{
tokens: tokens,
programs: map[string][]RewriteOperation{
- DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
},
- lastRewriteTokenIndexes: map[string]int{},
+ last_rewrite_token_indexes: map[string]int{},
}
}
@@ -293,110 +291,110 @@ func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
// Rollback the instruction stream for a program so that
// the indicated instruction (via instructionIndex) is no
// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
- is, ok := tsr.programs[programName]
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
+ is, ok := tsr.programs[program_name]
if ok {
- tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
-func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
- tsr.Rollback(DefaultProgramName, instructionIndex)
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
+ tsr.Rollback(Default_Program_Name, instruction_index)
}
-// DeleteProgram Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
- tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+// Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
- tsr.DeleteProgram(DefaultProgramName)
+ tsr.DeleteProgram(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
- tsr.InsertAfter(DefaultProgramName, index, text)
+ tsr.InsertAfter(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
- tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
- tsr.InsertBefore(DefaultProgramName, index, text)
+ tsr.InsertBefore(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
- tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
- rewrites := tsr.GetProgram(programName)
+ rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(programName, op)
+ tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
- tsr.Replace(DefaultProgramName, from, to, text)
+ tsr.Replace(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
tsr.ReplaceDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
- tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
- tsr.ReplaceToken(DefaultProgramName, from, to, text)
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
tsr.ReplaceTokenDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
- tsr.Replace(programName, from, to, "")
+func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
+ tsr.Replace(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
- tsr.Delete(DefaultProgramName, from, to)
+ tsr.Delete(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
tsr.DeleteDefault(index, index)
}
-func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
- tsr.ReplaceToken(programName, from, to, "")
+func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
- tsr.DeleteToken(DefaultProgramName, from, to)
+ tsr.DeleteToken(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
- i, ok := tsr.lastRewriteTokenIndexes[programName]
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
if !ok {
return -1
}
@@ -404,15 +402,15 @@ func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int
}
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
- return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
- tsr.lastRewriteTokenIndexes[programName] = i
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
+ tsr.last_rewrite_token_indexes[program_name] = i
}
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
- is := make([]RewriteOperation, 0, ProgramInitSize)
+ is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
@@ -431,24 +429,24 @@ func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
return is
}
-// GetTextDefault returns the text from the original tokens altered per the
+// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter) GetTextDefault() string {
return tsr.GetText(
- DefaultProgramName,
+ Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
-// GetText returns the text from the original tokens altered per the
+// Return the text from the original tokens altered per the
// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
- rewrites := tsr.programs[programName]
+func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
start := interval.Start
stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
start = max(start, 0)
- if len(rewrites) == 0 {
+ if rewrites == nil || len(rewrites) == 0 {
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
@@ -484,13 +482,11 @@ func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) s
return buf.String()
}
-// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc...
-//
-// Here are the cases:
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
//
-// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.j.v leave alone, nonoverlapping
// I.i.u I.i.v combine: Iivu
//
// R.i-j.u R.x-y.v | i-j in x-y delete first R
@@ -502,38 +498,38 @@ func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) s
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
// R.x-y.v I.i.u | i in x-y ERROR
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
// I.i.u = insert u before op @ index i
// R.x-y.u = replace x-y indexed tokens with u
//
-// First we need to examine replaces. For any replace op:
+// First we need to examine replaces. For any replace op:
//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
//
-// Then we can deal with inserts:
+// Then we can deal with inserts:
//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this 'replace'.
-// 3. throw exception if index in same range as previous replace
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the 'replace' range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
//
-// The func returns a map from token index to operation.
+// Return a map from token index to operation.
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
// WALK REPLACES
for i := 0; i < len(rewrites); i++ {
@@ -551,7 +547,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if iop.index == rop.index {
// E.g., insert before 2, delete 2..2; update replace
// text to include insert before, kill insert
- rewrites[iop.instructionIndex] = nil
+ rewrites[iop.instruction_index] = nil
if rop.text != "" {
rop.text = iop.text + rop.text
} else {
@@ -559,7 +555,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
}
} else if iop.index > rop.index && iop.index <= rop.LastIndex {
// delete insert as it's a no-op.
- rewrites[iop.instructionIndex] = nil
+ rewrites[iop.instruction_index] = nil
}
}
}
@@ -568,7 +564,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if prevop, ok := rewrites[j].(*ReplaceOp); ok {
if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
// delete replace as it's a no-op.
- rewrites[prevop.instructionIndex] = nil
+ rewrites[prevop.instruction_index] = nil
continue
}
// throw exception unless disjoint or identical
@@ -576,9 +572,10 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
if prevop.text == "" && rop.text == "" && !disjoint {
- rewrites[prevop.instructionIndex] = nil
+ rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
} else if !disjoint {
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
@@ -610,7 +607,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
if prevIop.index == iop.GetIndex() {
iop.SetText(iop.GetText() + prevIop.text)
- rewrites[prevIop.instructionIndex] = nil
+ rewrites[prevIop.instruction_index] = nil
}
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
similarity index 67%
rename from vendor/github.com/antlr4-go/antlr/v4/transition.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
index 313b0fc127..36be4f7331 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/transition.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
@@ -72,7 +72,7 @@ func (t *BaseTransition) getSerializationType() int {
return t.serializationType
}
-func (t *BaseTransition) Matches(_, _, _ int) bool {
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented")
}
@@ -89,7 +89,6 @@ const (
TransitionPRECEDENCE = 10
)
-//goland:noinspection GoUnusedGlobalVariable
var TransitionserializationNames = []string{
"INVALID",
"EPSILON",
@@ -128,22 +127,19 @@ var TransitionserializationNames = []string{
// TransitionPRECEDENCE
//}
-// AtomTransition
// TODO: make all transitions sets? no, should remove set edges
type AtomTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
- t := &AtomTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionATOM,
- label: intervalSet,
- isEpsilon: false,
- },
- }
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
return t
}
@@ -154,7 +150,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
return s
}
-func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label == symbol
}
@@ -163,45 +159,48 @@ func (t *AtomTransition) String() string {
}
type RuleTransition struct {
- BaseTransition
+ *BaseTransition
+
followState ATNState
ruleIndex, precedence int
}
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
- return &RuleTransition{
- BaseTransition: BaseTransition{
- target: ruleStart,
- isEpsilon: true,
- serializationType: TransitionRULE,
- },
- ruleIndex: ruleIndex,
- precedence: precedence,
- followState: followState,
- }
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
}
-func (t *RuleTransition) Matches(_, _, _ int) bool {
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
type EpsilonTransition struct {
- BaseTransition
+ *BaseTransition
+
outermostPrecedenceReturn int
}
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
- return &EpsilonTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionEPSILON,
- isEpsilon: true,
- },
- outermostPrecedenceReturn: outermostPrecedenceReturn,
- }
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
}
-func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -210,20 +209,19 @@ func (t *EpsilonTransition) String() string {
}
type RangeTransition struct {
- BaseTransition
+ *BaseTransition
+
start, stop int
}
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
- t := &RangeTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionRANGE,
- isEpsilon: false,
- },
- start: start,
- stop: stop,
- }
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
t.intervalSet = t.makeLabel()
return t
}
@@ -234,7 +232,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet {
return s
}
-func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop
}
@@ -254,41 +252,40 @@ type AbstractPredicateTransition interface {
}
type BaseAbstractPredicateTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
- return &BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- },
- }
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
}
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
type PredicateTransition struct {
- BaseAbstractPredicateTransition
+ *BaseAbstractPredicateTransition
+
isCtxDependent bool
ruleIndex, predIndex int
}
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
- return &PredicateTransition{
- BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionPREDICATE,
- isEpsilon: true,
- },
- },
- isCtxDependent: isCtxDependent,
- ruleIndex: ruleIndex,
- predIndex: predIndex,
- }
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
}
-func (t *PredicateTransition) Matches(_, _, _ int) bool {
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -301,25 +298,26 @@ func (t *PredicateTransition) String() string {
}
type ActionTransition struct {
- BaseTransition
+ *BaseTransition
+
isCtxDependent bool
ruleIndex, actionIndex, predIndex int
}
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
- return &ActionTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionACTION,
- isEpsilon: true,
- },
- isCtxDependent: isCtxDependent,
- ruleIndex: ruleIndex,
- actionIndex: actionIndex,
- }
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
}
-func (t *ActionTransition) Matches(_, _, _ int) bool {
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
@@ -328,27 +326,26 @@ func (t *ActionTransition) String() string {
}
type SetTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
- t := &SetTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionSET,
- },
- }
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
if set != nil {
t.intervalSet = set
} else {
t.intervalSet = NewIntervalSet()
t.intervalSet.addOne(TokenInvalidType)
}
+
return t
}
-func (t *SetTransition) Matches(symbol, _, _ int) bool {
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.intervalSet.contains(symbol)
}
@@ -357,24 +354,16 @@ func (t *SetTransition) String() string {
}
type NotSetTransition struct {
- SetTransition
+ *SetTransition
}
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
- t := &NotSetTransition{
- SetTransition: SetTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionNOTSET,
- },
- },
- }
- if set != nil {
- t.intervalSet = set
- } else {
- t.intervalSet = NewIntervalSet()
- t.intervalSet.addOne(TokenInvalidType)
- }
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
return t
}
@@ -388,16 +377,16 @@ func (t *NotSetTransition) String() string {
}
type WildcardTransition struct {
- BaseTransition
+ *BaseTransition
}
func NewWildcardTransition(target ATNState) *WildcardTransition {
- return &WildcardTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionWILDCARD,
- },
- }
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
}
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
@@ -409,24 +398,24 @@ func (t *WildcardTransition) String() string {
}
type PrecedencePredicateTransition struct {
- BaseAbstractPredicateTransition
+ *BaseAbstractPredicateTransition
+
precedence int
}
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
- return &PrecedencePredicateTransition{
- BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
- BaseTransition: BaseTransition{
- target: target,
- serializationType: TransitionPRECEDENCE,
- isEpsilon: true,
- },
- },
- precedence: precedence,
- }
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
}
-func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
similarity index 62%
rename from vendor/github.com/antlr4-go/antlr/v4/tree.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
index c288420fb2..85b4f137b5 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/tree.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
@@ -21,23 +21,29 @@ type Tree interface {
type SyntaxTree interface {
Tree
- GetSourceInterval() Interval
+
+ GetSourceInterval() *Interval
}
type ParseTree interface {
SyntaxTree
+
Accept(Visitor ParseTreeVisitor) interface{}
GetText() string
+
ToStringTree([]string, Recognizer) string
}
type RuleNode interface {
ParseTree
+
GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
}
type TerminalNode interface {
ParseTree
+
GetSymbol() Token
}
@@ -58,12 +64,12 @@ type BaseParseTreeVisitor struct{}
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
-func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
-func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
-// TODO: Implement this?
+// TODO
//func (this ParseTreeVisitor) Visit(ctx) {
// if (Utils.isArray(ctx)) {
// self := this
@@ -95,14 +101,15 @@ type BaseParseTreeListener struct{}
var _ ParseTreeListener = &BaseParseTreeListener{}
-func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
-func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
-func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
-func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
type TerminalNodeImpl struct {
parentCtx RuleContext
- symbol Token
+
+ symbol Token
}
var _ TerminalNode = &TerminalNodeImpl{}
@@ -116,7 +123,7 @@ func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
return tn
}
-func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
return nil
}
@@ -124,7 +131,7 @@ func (t *TerminalNodeImpl) GetChildren() []Tree {
return nil
}
-func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
panic("Cannot set children on terminal node")
}
@@ -144,7 +151,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} {
return t.symbol
}
-func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
if t.symbol == nil {
return TreeInvalidInterval
}
@@ -172,7 +179,7 @@ func (t *TerminalNodeImpl) String() string {
return t.symbol.GetText()
}
-func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
return t.String()
}
@@ -207,9 +214,10 @@ func NewParseTreeWalker() *ParseTreeWalker {
return new(ParseTreeWalker)
}
-// Walk performs a walk on the given parse tree starting at the root and going down recursively
-// with depth-first search. On each node, [EnterRule] is called before
-// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+// Performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, EnterRule is called before
+// recursively walking down into child nodes, then
+// ExitRule is called after the recursive call to wind up.
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
switch tt := t.(type) {
case ErrorNode:
@@ -226,7 +234,7 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
}
}
-// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
// then by triggering the event specific to the given parse tree node
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
@@ -234,71 +242,12 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
ctx.EnterRule(listener)
}
-// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
-// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+// Exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
ctx.ExitRule(listener)
listener.ExitEveryRule(ctx)
}
-//goland:noinspection GoUnusedGlobalVariable
var ParseTreeWalkerDefault = NewParseTreeWalker()
-
-type IterativeParseTreeWalker struct {
- *ParseTreeWalker
-}
-
-//goland:noinspection GoUnusedExportedFunction
-func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
- return new(IterativeParseTreeWalker)
-}
-
-func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
- var stack []Tree
- var indexStack []int
- currentNode := t
- currentIndex := 0
-
- for currentNode != nil {
- // pre-order visit
- switch tt := currentNode.(type) {
- case ErrorNode:
- listener.VisitErrorNode(tt)
- case TerminalNode:
- listener.VisitTerminal(tt)
- default:
- i.EnterRule(listener, currentNode.(RuleNode))
- }
- // Move down to first child, if exists
- if currentNode.GetChildCount() > 0 {
- stack = append(stack, currentNode)
- indexStack = append(indexStack, currentIndex)
- currentIndex = 0
- currentNode = currentNode.GetChild(0)
- continue
- }
-
- for {
- // post-order visit
- if ruleNode, ok := currentNode.(RuleNode); ok {
- i.ExitRule(listener, ruleNode)
- }
- // No parent, so no siblings
- if len(stack) == 0 {
- currentNode = nil
- currentIndex = 0
- break
- }
- // Move to next sibling if possible
- currentIndex++
- if stack[len(stack)-1].GetChildCount() > currentIndex {
- currentNode = stack[len(stack)-1].GetChild(currentIndex)
- break
- }
- // No next, sibling, so move up
- currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
- currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
- }
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
similarity index 81%
rename from vendor/github.com/antlr4-go/antlr/v4/trees.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
index f44c05d811..d7dbb03228 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/trees.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
@@ -8,8 +8,10 @@ import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
-// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
-// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+//
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
if recog != nil {
@@ -30,7 +32,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
}
for i := 1; i < c; i++ {
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
- res += " " + s
+ res += (" " + s)
}
res += ")"
return res
@@ -60,7 +62,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
}
}
- // no recognition for rule names
+ // no recog for rule names
payload := t.GetPayload()
if p2, ok := payload.(Token); ok {
return p2.GetText()
@@ -69,9 +71,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
return fmt.Sprint(t.GetPayload())
}
-// TreesGetChildren returns am ordered list of all children of this node
-//
-//goland:noinspection GoUnusedExportedFunction
+// Return ordered list of all children of this node
func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
@@ -80,10 +80,9 @@ func TreesGetChildren(t Tree) []Tree {
return list
}
-// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
-// and the last node is the parent of this node.
+// Return a list of all ancestors of this node. The first node of
//
-//goland:noinspection GoUnusedExportedFunction
+// list is the root and the last is the parent of this node.
func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
@@ -95,12 +94,10 @@ func TreesgetAncestors(t Tree) []Tree {
return ancestors
}
-//goland:noinspection GoUnusedExportedFunction
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
-//goland:noinspection GoUnusedExportedFunction
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
@@ -132,7 +129,6 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr
}
}
-//goland:noinspection GoUnusedExportedFunction
func TreesDescendants(t ParseTree) []ParseTree {
nodes := []ParseTree{t}
for i := 0; i < t.GetChildCount(); i++ {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
similarity index 74%
rename from vendor/github.com/antlr4-go/antlr/v4/utils.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
index 36a37f247a..9fad5d916b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/utils.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
@@ -9,10 +9,8 @@ import (
"errors"
"fmt"
"math/bits"
- "os"
"strconv"
"strings"
- "syscall"
)
func intMin(a, b int) int {
@@ -33,7 +31,7 @@ func intMax(a, b int) int {
type IntStack []int
-var ErrEmptyStack = errors.New("stack is empty")
+var ErrEmptyStack = errors.New("Stack is empty")
func (s *IntStack) Pop() (int, error) {
l := len(*s) - 1
@@ -49,13 +47,33 @@ func (s *IntStack) Push(e int) {
*s = append(*s, e)
}
+type comparable interface {
+ Equals(other Collectable[any]) bool
+}
+
+func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
+
+ return a.Equals(b)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.Hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ Hash() int
+}
+
const bitsPerWord = 64
func indexForBit(bit int) int {
return bit / bitsPerWord
}
-//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
func wordForBit(data []uint64, bit int) uint64 {
idx := indexForBit(bit)
if idx >= len(data) {
@@ -76,8 +94,6 @@ type BitSet struct {
data []uint64
}
-// NewBitSet creates a new bitwise set
-// TODO: See if we can replace with the standard library's BitSet
func NewBitSet() *BitSet {
return &BitSet{}
}
@@ -107,7 +123,7 @@ func (b *BitSet) or(set *BitSet) {
setLen := set.minLen()
maxLen := intMax(bLen, setLen)
if maxLen > len(b.data) {
- // Increase the size of len(b.data) to represent the bits in both sets.
+ // Increase the size of len(b.data) to repesent the bits in both sets.
data := make([]uint64, maxLen)
copy(data, b.data)
b.data = data
@@ -230,6 +246,37 @@ func (a *AltDict) values() []interface{} {
return vs
}
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
func EscapeWhitespace(s string, escapeSpaces bool) string {
s = strings.Replace(s, "\t", "\\t", -1)
@@ -241,7 +288,6 @@ func EscapeWhitespace(s string, escapeSpaces bool) string {
return s
}
-//goland:noinspection GoUnusedExportedFunction
func TerminalNodeToStringArray(sa []TerminalNode) []string {
st := make([]string, len(sa))
@@ -252,7 +298,6 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string {
return st
}
-//goland:noinspection GoUnusedExportedFunction
func PrintArrayJavaStyle(sa []string) string {
var buffer bytes.Buffer
@@ -305,77 +350,3 @@ func murmurFinish(h int, numberOfWords int) int {
return int(hash)
}
-
-func isDirectory(dir string) (bool, error) {
- fileInfo, err := os.Stat(dir)
- if err != nil {
- switch {
- case errors.Is(err, syscall.ENOENT):
- // The given directory does not exist, so we will try to create it
- //
- err = os.MkdirAll(dir, 0755)
- if err != nil {
- return false, err
- }
-
- return true, nil
- case err != nil:
- return false, err
- default:
- }
- }
- return fileInfo.IsDir(), err
-}
-
-// intSlicesEqual returns true if the two slices of ints are equal, and is a little
-// faster than slices.Equal.
-func intSlicesEqual(s1, s2 []int) bool {
- if s1 == nil && s2 == nil {
- return true
- }
- if s1 == nil || s2 == nil {
- return false
- }
- if len(s1) == 0 && len(s2) == 0 {
- return true
- }
-
- if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) {
- return false
- }
- // If the slices are using the same memory, then they are the same slice
- if &s1[0] == &s2[0] {
- return true
- }
- for i, v := range s1 {
- if v != s2[i] {
- return false
- }
- }
- return true
-}
-
-func pcSliceEqual(s1, s2 []*PredictionContext) bool {
- if s1 == nil && s2 == nil {
- return true
- }
- if s1 == nil || s2 == nil {
- return false
- }
- if len(s1) == 0 && len(s2) == 0 {
- return true
- }
- if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) {
- return false
- }
- // If the slices are using the same memory, then they are the same slice
- if &s1[0] == &s2[0] {
- return true
- }
- for i, v := range s1 {
- if !v.Equals(s2[i]) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
new file mode 100644
index 0000000000..c9bd6751e3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
@@ -0,0 +1,235 @@
+package antlr
+
+import "math"
+
+const (
+ _initalCapacity = 16
+ _initalBucketCapacity = 8
+ _loadFactor = 0.75
+)
+
+type Set interface {
+ Add(value interface{}) (added interface{})
+ Len() int
+ Get(value interface{}) (found interface{})
+ Contains(value interface{}) bool
+ Values() []interface{}
+ Each(f func(interface{}) bool)
+}
+
+type array2DHashSet struct {
+ buckets [][]Collectable[any]
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(Collectable[any], Collectable[any]) bool
+
+ n int // How many elements in set
+ threshold int // when to expand
+
+ currentPrime int // jump by 4 primes each expand or whatever
+ initialBucketCapacity int
+}
+
+func (as *array2DHashSet) Each(f func(interface{}) bool) {
+ if as.Len() < 1 {
+ return
+ }
+
+ for _, bucket := range as.buckets {
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+ if !f(o) {
+ return
+ }
+ }
+ }
+}
+
+func (as *array2DHashSet) Values() []interface{} {
+ if as.Len() < 1 {
+ return nil
+ }
+
+ values := make([]interface{}, 0, as.Len())
+ as.Each(func(i interface{}) bool {
+ values = append(values, i)
+ return true
+ })
+ return values
+}
+
+func (as *array2DHashSet) Contains(value Collectable[any]) bool {
+ return as.Get(value) != nil
+}
+
+func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
+ if as.n > as.threshold {
+ as.expand()
+ }
+ return as.innerAdd(value)
+}
+
+func (as *array2DHashSet) expand() {
+ old := as.buckets
+
+ as.currentPrime += 4
+
+ var (
+ newCapacity = len(as.buckets) << 1
+ newTable = as.createBuckets(newCapacity)
+ newBucketLengths = make([]int, len(newTable))
+ )
+
+ as.buckets = newTable
+ as.threshold = int(float64(newCapacity) * _loadFactor)
+
+ for _, bucket := range old {
+ if bucket == nil {
+ continue
+ }
+
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+
+ b := as.getBuckets(o)
+ bucketLength := newBucketLengths[b]
+ var newBucket []Collectable[any]
+ if bucketLength == 0 {
+ // new bucket
+ newBucket = as.createBucket(as.initialBucketCapacity)
+ newTable[b] = newBucket
+ } else {
+ newBucket = newTable[b]
+ if bucketLength == len(newBucket) {
+ // expand
+ newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
+ copy(newBucketCopy[:bucketLength], newBucket)
+ newBucket = newBucketCopy
+ newTable[b] = newBucket
+ }
+ }
+
+ newBucket[bucketLength] = o
+ newBucketLengths[b]++
+ }
+ }
+}
+
+func (as *array2DHashSet) Len() int {
+ return as.n
+}
+
+func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
+ if o == nil {
+ return nil
+ }
+
+ b := as.getBuckets(o)
+ bucket := as.buckets[b]
+ if bucket == nil { // no bucket
+ return nil
+ }
+
+ for _, e := range bucket {
+ if e == nil {
+ return nil // empty slot; not there
+ }
+ if as.equalsFunction(e, o) {
+ return e
+ }
+ }
+
+ return nil
+}
+
+func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
+ b := as.getBuckets(o)
+
+ bucket := as.buckets[b]
+
+ // new bucket
+ if bucket == nil {
+ bucket = as.createBucket(as.initialBucketCapacity)
+ bucket[0] = o
+
+ as.buckets[b] = bucket
+ as.n++
+ return o
+ }
+
+ // look for it in bucket
+ for i := 0; i < len(bucket); i++ {
+ existing := bucket[i]
+ if existing == nil { // empty slot; not there, add.
+ bucket[i] = o
+ as.n++
+ return o
+ }
+
+ if as.equalsFunction(existing, o) { // found existing, quit
+ return existing
+ }
+ }
+
+ // full bucket, expand and add to end
+ oldLength := len(bucket)
+ bucketCopy := make([]Collectable[any], oldLength<<1)
+ copy(bucketCopy[:oldLength], bucket)
+ bucket = bucketCopy
+ as.buckets[b] = bucket
+ bucket[oldLength] = o
+ as.n++
+ return o
+}
+
+func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
+ hash := as.hashcodeFunction(value)
+ return hash & (len(as.buckets) - 1)
+}
+
+func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
+ return make([][]Collectable[any], cap)
+}
+
+func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
+ return make([]Collectable[any], cap)
+}
+
+func newArray2DHashSetWithCap(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+ initCap int,
+ initBucketCap int,
+) *array2DHashSet {
+ if hashcodeFunction == nil {
+ hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ equalsFunction = standardEqualsFunction
+ }
+
+ ret := &array2DHashSet{
+ hashcodeFunction: hashcodeFunction,
+ equalsFunction: equalsFunction,
+
+ n: 0,
+ threshold: int(math.Floor(_initalCapacity * _loadFactor)),
+
+ currentPrime: 1,
+ initialBucketCapacity: initBucketCap,
+ }
+
+ ret.buckets = ret.createBuckets(initCap)
+ return ret
+}
+
+func newArray2DHashSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
+) *array2DHashSet {
+ return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
deleted file mode 100644
index 38ea34ff51..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/.gitignore
+++ /dev/null
@@ -1,18 +0,0 @@
-### Go template
-
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-
-# Go workspace file
-go.work
-
-# No Goland stuff in this repo
-.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
deleted file mode 100644
index a22292eb5a..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-3. Neither name of copyright holders nor the names of its contributors
-may be used to endorse or promote products derived from this software
-without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
deleted file mode 100644
index 03e5b83eb1..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr)
-[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest)
-[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest)
-[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity)
-[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
-[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
-# ANTLR4 Go Runtime Module Repo
-
-IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
-
- - Do not submit PRs or any change requests to this repo
- - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
- - This repo contains the Go runtime that your generated projects should import
-
-## Introduction
-
-This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
-at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
-the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
-
-The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
-
-### Why?
-
-The `go get` command is unable to retrieve the Go runtime when it is embedded so
-deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
-does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
-causes confusion.
-
-For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
-
-```sh
-require (
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
-)
-```
-
-Where you would expect to see:
-
-```sh
-require (
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
-)
-```
-
-The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
-from whence users can expect `go get` to behave as expected.
-
-
-# Documentation
-Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
-migrating existing projects to use the new module location and for information on how to use the Go runtime in
-general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
deleted file mode 100644
index 48bd362bf5..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
-Package antlr implements the Go version of the ANTLR 4 runtime.
-
-# The ANTLR Tool
-
-ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
-or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
-From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
-(or visitor) that makes it easy to respond to the recognition of phrases of interest.
-
-# Go Runtime
-
-At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
-source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
-ANTLR4 that it is compatible with (I.E. uses the /v4 path).
-
-However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
-of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
-This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
-list the release tag such as @4.13.1 - this was confusing, to say the least.
-
-As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
-(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
-which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
-
-This means that if you are using the source code without modules, you should also use the source code in the [new repo].
-Though we highly recommend that you use go modules, as they are now idiomatic for Go.
-
-I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
-
-Go runtime author: [Jim Idle] jimi@idle.ws
-
-# Code Generation
-
-ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
-runtime library, written specifically to support the generated code in the target language. This library is the
-runtime for the Go target.
-
-To generate code for the go target, it is generally recommended to place the source grammar files in a package of
-their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
-it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
-that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
-way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
-your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
-it was at any point in its history.
-
-Here is a general/recommended template for an ANTLR based recognizer in Go:
-
- .
- ├── parser
- │ ├── mygrammar.g4
- │ ├── antlr-4.13.1-complete.jar
- │ ├── generate.go
- │ └── generate.sh
- ├── parsing - generated code goes here
- │ └── error_listeners.go
- ├── go.mod
- ├── go.sum
- ├── main.go
- └── main_test.go
-
-Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
-
-The generate.go file then looks like this:
-
- package parser
-
- //go:generate ./generate.sh
-
-And the generate.sh file will look similar to this:
-
- #!/bin/sh
-
- alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
- antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
-
-depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
-is to generate the code into a
-
-From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
-
- go generate ./...
-
-Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
-by importing the parsing package.
-
-There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
-
-# Copyright Notice
-
-Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
-
-Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
-
-[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
-[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
-[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
-[new repo]: https://github.com/antlr4-go/antlr
-[Jim Idle]: https://github.com/jimidle
-[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
-*/
-package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
deleted file mode 100644
index 267308bb3d..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-const (
- lexerConfig = iota // Indicates that this ATNConfig is for a lexer
- parserConfig // Indicates that this ATNConfig is for a parser
-)
-
-// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
-// context). The syntactic context is a graph-structured stack node whose
-// path(s) to the root is the rule invocation(s) chain used to arrive in the
-// state. The semantic context is the tree of semantic predicates encountered
-// before reaching an ATN state.
-type ATNConfig struct {
- precedenceFilterSuppressed bool
- state ATNState
- alt int
- context *PredictionContext
- semanticContext SemanticContext
- reachesIntoOuterContext int
- cType int // lexerConfig or parserConfig
- lexerActionExecutor *LexerActionExecutor
- passedThroughNonGreedyDecision bool
-}
-
-// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
-func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- return NewATNConfig5(state, alt, context, SemanticContextNone)
-}
-
-// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
-func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Necessary?
- }
-
- pac := &ATNConfig{}
- pac.state = state
- pac.alt = alt
- pac.context = context
- pac.semanticContext = semanticContext
- pac.cType = parserConfig
- return pac
-}
-
-// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
-func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
- return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
-}
-
-// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
-func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
- return NewATNConfig(c, state, c.GetContext(), semanticContext)
-}
-
-// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
-func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
- return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
-}
-
-// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
-func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
- return NewATNConfig(c, state, context, c.GetSemanticContext())
-}
-
-// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
-// are just wrappers around this one.
-func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
- b := &ATNConfig{}
- b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
- b.cType = parserConfig
- return b
-}
-
-func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
-
- a.state = state
- a.alt = alt
- a.context = context
- a.semanticContext = semanticContext
- a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
- a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
-}
-
-func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
- return a.precedenceFilterSuppressed
-}
-
-func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
- a.precedenceFilterSuppressed = v
-}
-
-// GetState returns the ATN state associated with this configuration
-func (a *ATNConfig) GetState() ATNState {
- return a.state
-}
-
-// GetAlt returns the alternative associated with this configuration
-func (a *ATNConfig) GetAlt() int {
- return a.alt
-}
-
-// SetContext sets the rule invocation stack associated with this configuration
-func (a *ATNConfig) SetContext(v *PredictionContext) {
- a.context = v
-}
-
-// GetContext returns the rule invocation stack associated with this configuration
-func (a *ATNConfig) GetContext() *PredictionContext {
- return a.context
-}
-
-// GetSemanticContext returns the semantic context associated with this configuration
-func (a *ATNConfig) GetSemanticContext() SemanticContext {
- return a.semanticContext
-}
-
-// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
-func (a *ATNConfig) GetReachesIntoOuterContext() int {
- return a.reachesIntoOuterContext
-}
-
-// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
-func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
- a.reachesIntoOuterContext = v
-}
-
-// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
- switch a.cType {
- case lexerConfig:
- return a.LEquals(o)
- case parserConfig:
- return a.PEquals(o)
- default:
- panic("Invalid ATNConfig type")
- }
-}
-
-// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
- var other, ok = o.(*ATNConfig)
-
- if !ok {
- return false
- }
- if a == other {
- return true
- } else if other == nil {
- return false
- }
-
- var equal bool
-
- if a.context == nil {
- equal = other.context == nil
- } else {
- equal = a.context.Equals(other.context)
- }
-
- var (
- nums = a.state.GetStateNumber() == other.state.GetStateNumber()
- alts = a.alt == other.alt
- cons = a.semanticContext.Equals(other.semanticContext)
- sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
- )
-
- return nums && alts && cons && sups && equal
-}
-
-// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
-// is required for a collection
-func (a *ATNConfig) Hash() int {
- switch a.cType {
- case lexerConfig:
- return a.LHash()
- case parserConfig:
- return a.PHash()
- default:
- panic("Invalid ATNConfig type")
- }
-}
-
-// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
-// is required for a collection
-func (a *ATNConfig) PHash() int {
- var c int
- if a.context != nil {
- c = a.context.Hash()
- }
-
- h := murmurInit(7)
- h = murmurUpdate(h, a.state.GetStateNumber())
- h = murmurUpdate(h, a.alt)
- h = murmurUpdate(h, c)
- h = murmurUpdate(h, a.semanticContext.Hash())
- return murmurFinish(h, 4)
-}
-
-// String returns a string representation of the ATNConfig, usually used for debugging purposes
-func (a *ATNConfig) String() string {
- var s1, s2, s3 string
-
- if a.context != nil {
- s1 = ",[" + fmt.Sprint(a.context) + "]"
- }
-
- if a.semanticContext != SemanticContextNone {
- s2 = "," + fmt.Sprint(a.semanticContext)
- }
-
- if a.reachesIntoOuterContext > 0 {
- s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
- }
-
- return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
-}
-
-func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.state = state
- lac.alt = alt
- lac.context = context
- lac.semanticContext = SemanticContextNone
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = c.lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.lexerActionExecutor = c.lexerActionExecutor
- lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
- lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
- lac.cType = lexerConfig
- return lac
-}
-
-//goland:noinspection GoUnusedExportedFunction
-func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
- lac := &ATNConfig{}
- lac.state = state
- lac.alt = alt
- lac.context = context
- lac.semanticContext = SemanticContextNone
- lac.cType = lexerConfig
- return lac
-}
-
-// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (a *ATNConfig) LHash() int {
- var f int
- if a.passedThroughNonGreedyDecision {
- f = 1
- } else {
- f = 0
- }
- h := murmurInit(7)
- h = murmurUpdate(h, a.state.GetStateNumber())
- h = murmurUpdate(h, a.alt)
- h = murmurUpdate(h, a.context.Hash())
- h = murmurUpdate(h, a.semanticContext.Hash())
- h = murmurUpdate(h, f)
- h = murmurUpdate(h, a.lexerActionExecutor.Hash())
- h = murmurFinish(h, 6)
- return h
-}
-
-// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
- var otherT, ok = other.(*ATNConfig)
- if !ok {
- return false
- } else if a == otherT {
- return true
- } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
- return false
- }
-
- switch {
- case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
- return true
- case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
- if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
- return false
- }
- default:
- return false // One but not both, are nil
- }
-
- return a.PEquals(otherT)
-}
-
-func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
- var ds, ok = target.(DecisionState)
-
- return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
deleted file mode 100644
index 52dbaf8064..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-// ATNConfigSet is a specialized set of ATNConfig that tracks information
-// about its elements and can combine similar configurations using a
-// graph-structured stack.
-type ATNConfigSet struct {
- cachedHash int
-
- // configLookup is used to determine whether two ATNConfigSets are equal. We
- // need all configurations with the same (s, i, _, semctx) to be equal. A key
- // effectively doubles the number of objects associated with ATNConfigs. All
- // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
- // read-only because a set becomes a DFA state.
- configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
-
- // configs is the added elements that did not match an existing key in configLookup
- configs []*ATNConfig
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves re-computation. Can we track conflicts as they
- // are added to save scanning configs later?
- conflictingAlts *BitSet
-
- // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
- // we hit a pred while computing a closure operation. Do not make a DFA state
- // from the ATNConfigSet in this case. TODO: How is this used by parsers?
- dipsIntoOuterContext bool
-
- // fullCtx is whether it is part of a full context LL prediction. Used to
- // determine how to merge $. It is a wildcard with SLL, but not for an LL
- // context merge.
- fullCtx bool
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from this set.
- hasSemanticContext bool
-
- // readOnly is whether it is read-only. Do not
- // allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not, protect other fields; conflictingAlts
- // in particular, which is assigned after readOnly.
- readOnly bool
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves re-computation. Can we track conflicts as they
- // are added to save scanning configs later?
- uniqueAlt int
-}
-
-// Alts returns the combined set of alts for all the configurations in this set.
-func (b *ATNConfigSet) Alts() *BitSet {
- alts := NewBitSet()
- for _, it := range b.configs {
- alts.add(it.GetAlt())
- }
- return alts
-}
-
-// NewATNConfigSet creates a new ATNConfigSet instance.
-func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
- return &ATNConfigSet{
- cachedHash: -1,
- configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
- fullCtx: fullCtx,
- }
-}
-
-// Add merges contexts with existing configs for (s, i, pi, _),
-// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
-// 'pi' is the [ATNConfig].semanticContext.
-//
-// We use (s,i,pi) as the key.
-// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
-func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if config.GetSemanticContext() != SemanticContextNone {
- b.hasSemanticContext = true
- }
-
- if config.GetReachesIntoOuterContext() > 0 {
- b.dipsIntoOuterContext = true
- }
-
- existing, present := b.configLookup.Put(config)
-
- // The config was not already in the set
- //
- if !present {
- b.cachedHash = -1
- b.configs = append(b.configs, config) // Track order here
- return true
- }
-
- // Merge a previous (s, i, pi, _) with it and save the result
- rootIsWildcard := !b.fullCtx
- merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
-
- // No need to check for existing.context because config.context is in the cache,
- // since the only way to create new graphs is the "call rule" and here. We cache
- // at both places.
- existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
-
- // Preserve the precedence filter suppression during the merge
- if config.getPrecedenceFilterSuppressed() {
- existing.setPrecedenceFilterSuppressed(true)
- }
-
- // Replace the context because there is no need to do alt mapping
- existing.SetContext(merged)
-
- return true
-}
-
-// GetStates returns the set of states represented by all configurations in this config set
-func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
-
- // states uses the standard comparator and Hash() provided by the ATNState instance
- //
- states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
-
- for i := 0; i < len(b.configs); i++ {
- states.Put(b.configs[i].GetState())
- }
-
- return states
-}
-
-func (b *ATNConfigSet) GetPredicates() []SemanticContext {
- predicates := make([]SemanticContext, 0)
-
- for i := 0; i < len(b.configs); i++ {
- c := b.configs[i].GetSemanticContext()
-
- if c != SemanticContextNone {
- predicates = append(predicates, c)
- }
- }
-
- return predicates
-}
-
-func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
- if b.readOnly {
- panic("set is read-only")
- }
-
- // Empty indicate no optimization is possible
- if b.configLookup == nil || b.configLookup.Len() == 0 {
- return
- }
-
- for i := 0; i < len(b.configs); i++ {
- config := b.configs[i]
- config.SetContext(interpreter.getCachedContext(config.GetContext()))
- }
-}
-
-func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
- for i := 0; i < len(coll); i++ {
- b.Add(coll[i], nil)
- }
-
- return false
-}
-
-// Compare The configs are only equal if they are in the same order and their Equals function returns true.
-// Java uses ArrayList.equals(), which requires the same order.
-func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
- if len(b.configs) != len(bs.configs) {
- return false
- }
- for i := 0; i < len(b.configs); i++ {
- if !b.configs[i].Equals(bs.configs[i]) {
- return false
- }
- }
-
- return true
-}
-
-func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
- if b == other {
- return true
- } else if _, ok := other.(*ATNConfigSet); !ok {
- return false
- }
-
- other2 := other.(*ATNConfigSet)
- var eca bool
- switch {
- case b.conflictingAlts == nil && other2.conflictingAlts == nil:
- eca = true
- case b.conflictingAlts != nil && other2.conflictingAlts != nil:
- eca = b.conflictingAlts.equals(other2.conflictingAlts)
- }
- return b.configs != nil &&
- b.fullCtx == other2.fullCtx &&
- b.uniqueAlt == other2.uniqueAlt &&
- eca &&
- b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
- b.Compare(other2)
-}
-
-func (b *ATNConfigSet) Hash() int {
- if b.readOnly {
- if b.cachedHash == -1 {
- b.cachedHash = b.hashCodeConfigs()
- }
-
- return b.cachedHash
- }
-
- return b.hashCodeConfigs()
-}
-
-func (b *ATNConfigSet) hashCodeConfigs() int {
- h := 1
- for _, config := range b.configs {
- h = 31*h + config.Hash()
- }
- return h
-}
-
-func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
- if b.readOnly {
- panic("not implemented for read-only sets")
- }
- if b.configLookup == nil {
- return false
- }
- return b.configLookup.Contains(item)
-}
-
-func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
- return b.Contains(item)
-}
-
-func (b *ATNConfigSet) Clear() {
- if b.readOnly {
- panic("set is read-only")
- }
- b.configs = make([]*ATNConfig, 0)
- b.cachedHash = -1
- b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
-}
-
-func (b *ATNConfigSet) String() string {
-
- s := "["
-
- for i, c := range b.configs {
- s += c.String()
-
- if i != len(b.configs)-1 {
- s += ", "
- }
- }
-
- s += "]"
-
- if b.hasSemanticContext {
- s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
- }
-
- if b.uniqueAlt != ATNInvalidAltNumber {
- s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
- }
-
- if b.conflictingAlts != nil {
- s += ",conflictingAlts=" + b.conflictingAlts.String()
- }
-
- if b.dipsIntoOuterContext {
- s += ",dipsIntoOuterContext"
- }
-
- return s
-}
-
-// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
-// for use in lexers.
-func NewOrderedATNConfigSet() *ATNConfigSet {
- return &ATNConfigSet{
- cachedHash: -1,
- // This set uses the standard Hash() and Equals() from ATNConfig
- configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
- fullCtx: false,
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
deleted file mode 100644
index c2b724514d..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/configuration.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package antlr
-
-type runtimeConfiguration struct {
- statsTraceStacks bool
- lexerATNSimulatorDebug bool
- lexerATNSimulatorDFADebug bool
- parserATNSimulatorDebug bool
- parserATNSimulatorTraceATNSim bool
- parserATNSimulatorDFADebug bool
- parserATNSimulatorRetryDebug bool
- lRLoopEntryBranchOpt bool
- memoryManager bool
-}
-
-// Global runtime configuration
-var runtimeConfig = runtimeConfiguration{
- lRLoopEntryBranchOpt: true,
-}
-
-type runtimeOption func(*runtimeConfiguration) error
-
-// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
-// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
-// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
-// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
-// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
-// memory allocation type used by the runtime such as sync.Pool or not.
-//
-// The options are applied in the order they are passed in, so the last option will override any previous options.
-//
-// For example, if you want to turn on the collection create point stack flag to true, you can do:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
-//
-// If you want to turn it off, you can do:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
-func ConfigureRuntime(options ...runtimeOption) error {
- for _, option := range options {
- err := option(&runtimeConfig)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
-// certain structs, such as collections, or the use point of certain methods such as Put().
-// Because this can be expensive, it is turned off by default. However, it
-// can be useful to track down exactly where memory is being created and used.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
-func WithStatsTraceStacks(trace bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.statsTraceStacks = trace
- return nil
- }
-}
-
-// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
-// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
-func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lexerATNSimulatorDebug = debug
- return nil
- }
-}
-
-// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
-// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
-func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lexerATNSimulatorDFADebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
-// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
-func WithParserATNSimulatorDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorDebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
-// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
-func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorTraceATNSim = trace
- return nil
- }
-}
-
-// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
-// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
-// to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
-func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorDFADebug = debug
- return nil
- }
-}
-
-// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
-// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
-// Only useful to the runtime maintainers.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
-func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.parserATNSimulatorRetryDebug = debug
- return nil
- }
-}
-
-// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
-// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
-// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
-//
-// Note that default is to use this optimization.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
-//
-// You can turn it off at any time using:
-//
-// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
-func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.lRLoopEntryBranchOpt = off
- return nil
- }
-}
-
-// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
-// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
-// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
-// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
-// grammar, this may help make it more practical.
-//
-// Note that default is to use normal Go memory allocation and not pool memory.
-//
-// Use:
-//
-// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
-//
-// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
-// and should remember to nil out any references to the parser or lexer when you are done with them.
-func WithMemoryManager(use bool) runtimeOption {
- return func(config *runtimeConfiguration) error {
- config.memoryManager = use
- return nil
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
deleted file mode 100644
index ab4e96be52..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bufio"
- "io"
-)
-
-type InputStream struct {
- name string
- index int
- data []rune
- size int
-}
-
-// NewIoStream creates a new input stream from the given io.Reader reader.
-// Note that the reader is read completely into memory and so it must actually
-// have a stopping point - you cannot pass in a reader on an open-ended source such
-// as a socket for instance.
-func NewIoStream(reader io.Reader) *InputStream {
-
- rReader := bufio.NewReader(reader)
-
- is := &InputStream{
- name: "",
- index: 0,
- }
-
- // Pre-build the buffer and read runes reasonably efficiently given that
- // we don't exactly know how big the input is.
- //
- is.data = make([]rune, 0, 512)
- for {
- r, _, err := rReader.ReadRune()
- if err != nil {
- break
- }
- is.data = append(is.data, r)
- }
- is.size = len(is.data) // number of runes
- return is
-}
-
-// NewInputStream creates a new input stream from the given string
-func NewInputStream(data string) *InputStream {
-
- is := &InputStream{
- name: "",
- index: 0,
- data: []rune(data), // This is actually the most efficient way
- }
- is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
- return is
-}
-
-func (is *InputStream) reset() {
- is.index = 0
-}
-
-// Consume moves the input pointer to the next character in the input stream
-func (is *InputStream) Consume() {
- if is.index >= is.size {
- // assert is.LA(1) == TokenEOF
- panic("cannot consume EOF")
- }
- is.index++
-}
-
-// LA returns the character at the given offset from the start of the input stream
-func (is *InputStream) LA(offset int) int {
-
- if offset == 0 {
- return 0 // nil
- }
- if offset < 0 {
- offset++ // e.g., translate LA(-1) to use offset=0
- }
- pos := is.index + offset - 1
-
- if pos < 0 || pos >= is.size { // invalid
- return TokenEOF
- }
-
- return int(is.data[pos])
-}
-
-// LT returns the character at the given offset from the start of the input stream
-func (is *InputStream) LT(offset int) int {
- return is.LA(offset)
-}
-
-// Index returns the current offset in to the input stream
-func (is *InputStream) Index() int {
- return is.index
-}
-
-// Size returns the total number of characters in the input stream
-func (is *InputStream) Size() int {
- return is.size
-}
-
-// Mark does nothing here as we have entire buffer
-func (is *InputStream) Mark() int {
- return -1
-}
-
-// Release does nothing here as we have entire buffer
-func (is *InputStream) Release(_ int) {
-}
-
-// Seek the input point to the provided index offset
-func (is *InputStream) Seek(index int) {
- if index <= is.index {
- is.index = index // just jump don't update stream state (line,...)
- return
- }
- // seek forward
- is.index = intMin(index, is.size)
-}
-
-// GetText returns the text from the input stream from the start to the stop index
-func (is *InputStream) GetText(start int, stop int) string {
- if stop >= is.size {
- stop = is.size - 1
- }
- if start >= is.size {
- return ""
- }
-
- return string(is.data[start : stop+1])
-}
-
-// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
-// character of the stop token
-func (is *InputStream) GetTextFromTokens(start, stop Token) string {
- if start != nil && stop != nil {
- return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
- }
-
- return ""
-}
-
-func (is *InputStream) GetTextFromInterval(i Interval) string {
- return is.GetText(i.Start, i.Stop)
-}
-
-func (*InputStream) GetSourceName() string {
- return "Obtained from string"
-}
-
-// String returns the entire input stream as a string
-func (is *InputStream) String() string {
- return string(is.data)
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
deleted file mode 100644
index 6d668f7983..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
+++ /dev/null
@@ -1,684 +0,0 @@
-package antlr
-
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-import (
- "container/list"
- "runtime/debug"
- "sort"
-)
-
-// Collectable is an interface that a struct should implement if it is to be
-// usable as a key in these collections.
-type Collectable[T any] interface {
- Hash() int
- Equals(other Collectable[T]) bool
-}
-
-type Comparator[T any] interface {
- Hash1(o T) int
- Equals2(T, T) bool
-}
-
-type CollectionSource int
-type CollectionDescriptor struct {
- SybolicName string
- Description string
-}
-
-const (
- UnknownCollection CollectionSource = iota
- ATNConfigLookupCollection
- ATNStateCollection
- DFAStateCollection
- ATNConfigCollection
- PredictionContextCollection
- SemanticContextCollection
- ClosureBusyCollection
- PredictionVisitedCollection
- MergeCacheCollection
- PredictionContextCacheCollection
- AltSetCollection
- ReachSetCollection
-)
-
-var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
- UnknownCollection: {
- SybolicName: "UnknownCollection",
- Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
- },
- ATNConfigCollection: {
- SybolicName: "ATNConfigCollection",
- Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
- "For instance, it is used to store the results of the closure() operation in the ATN.",
- },
- ATNConfigLookupCollection: {
- SybolicName: "ATNConfigLookupCollection",
- Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
- "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
- },
- ATNStateCollection: {
- SybolicName: "ATNStateCollection",
- Description: "ATNState collection. This is used to store the states of the ATN.",
- },
- DFAStateCollection: {
- SybolicName: "DFAStateCollection",
- Description: "DFAState collection. This is used to store the states of the DFA.",
- },
- PredictionContextCollection: {
- SybolicName: "PredictionContextCollection",
- Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
- },
- SemanticContextCollection: {
- SybolicName: "SemanticContextCollection",
- Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
- },
- ClosureBusyCollection: {
- SybolicName: "ClosureBusyCollection",
- Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
- "It stores ATNConfigs that are currently being processed in the closure() operation.",
- },
- PredictionVisitedCollection: {
- SybolicName: "PredictionVisitedCollection",
- Description: "A map that records whether we have visited a particular context when searching through cached entries.",
- },
- MergeCacheCollection: {
- SybolicName: "MergeCacheCollection",
- Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
- },
- PredictionContextCacheCollection: {
- SybolicName: "PredictionContextCacheCollection",
- Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
- },
- AltSetCollection: {
- SybolicName: "AltSetCollection",
- Description: "Used to eliminate duplicate alternatives in an ATN config set.",
- },
- ReachSetCollection: {
- SybolicName: "ReachSetCollection",
- Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
- },
-}
-
-// JStore implements a container that allows the use of a struct to calculate the key
-// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
-// serve the needs of the ANTLR Go runtime.
-//
-// For ease of porting the logic of the runtime from the master target (Java), this collection
-// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
-// function as the key. The values are stored in a standard go map which internally is a form of hashmap
-// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
-// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
-// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
-// we understand the requirements, then this is fine - this is not a general purpose collection.
-type JStore[T any, C Comparator[T]] struct {
- store map[int][]T
- len int
- comparator Comparator[T]
- stats *JStatRec
-}
-
-func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
-
- if comparator == nil {
- panic("comparator cannot be nil")
- }
-
- s := &JStore[T, C]{
- store: make(map[int][]T, 1),
- comparator: comparator,
- }
- if collectStats {
- s.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
-
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- s.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(s.stats)
- }
- return s
-}
-
-// Put will store given value in the collection. Note that the key for storage is generated from
-// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
-// as any kind of general collection.
-//
-// If the key has a hash conflict, then the value will be added to the slice of values associated with the
-// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
-// tested by calling the equals() method on the key.
-//
-// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
-//
-// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
-func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
-
- if collectStats {
- s.stats.Puts++
- }
- kh := s.comparator.Hash1(value)
-
- var hClash bool
- for _, v1 := range s.store[kh] {
- hClash = true
- if s.comparator.Equals2(value, v1) {
- if collectStats {
- s.stats.PutHits++
- s.stats.PutHashConflicts++
- }
- return v1, true
- }
- if collectStats {
- s.stats.PutMisses++
- }
- }
- if collectStats && hClash {
- s.stats.PutHashConflicts++
- }
- s.store[kh] = append(s.store[kh], value)
-
- if collectStats {
- if len(s.store[kh]) > s.stats.MaxSlotSize {
- s.stats.MaxSlotSize = len(s.store[kh])
- }
- }
- s.len++
- if collectStats {
- s.stats.CurSize = s.len
- if s.len > s.stats.MaxSize {
- s.stats.MaxSize = s.len
- }
- }
- return value, false
-}
-
-// Get will return the value associated with the key - the type of the key is the same type as the value
-// which would not generally be useful, but this is a specific thing for ANTLR where the key is
-// generated using the object we are going to store.
-func (s *JStore[T, C]) Get(key T) (T, bool) {
- if collectStats {
- s.stats.Gets++
- }
- kh := s.comparator.Hash1(key)
- var hClash bool
- for _, v := range s.store[kh] {
- hClash = true
- if s.comparator.Equals2(key, v) {
- if collectStats {
- s.stats.GetHits++
- s.stats.GetHashConflicts++
- }
- return v, true
- }
- if collectStats {
- s.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- s.stats.GetHashConflicts++
- }
- s.stats.GetNoEnt++
- }
- return key, false
-}
-
-// Contains returns true if the given key is present in the store
-func (s *JStore[T, C]) Contains(key T) bool {
- _, present := s.Get(key)
- return present
-}
-
-func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
- vs := make([]T, 0, len(s.store))
- for _, v := range s.store {
- vs = append(vs, v...)
- }
- sort.Slice(vs, func(i, j int) bool {
- return less(vs[i], vs[j])
- })
-
- return vs
-}
-
-func (s *JStore[T, C]) Each(f func(T) bool) {
- for _, e := range s.store {
- for _, v := range e {
- f(v)
- }
- }
-}
-
-func (s *JStore[T, C]) Len() int {
- return s.len
-}
-
-func (s *JStore[T, C]) Values() []T {
- vs := make([]T, 0, len(s.store))
- for _, e := range s.store {
- vs = append(vs, e...)
- }
- return vs
-}
-
-type entry[K, V any] struct {
- key K
- val V
-}
-
-type JMap[K, V any, C Comparator[K]] struct {
- store map[int][]*entry[K, V]
- len int
- comparator Comparator[K]
- stats *JStatRec
-}
-
-func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
- m := &JMap[K, V, C]{
- store: make(map[int][]*entry[K, V], 1),
- comparator: comparator,
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
- if collectStats {
- m.stats.Puts++
- }
- kh := m.comparator.Hash1(key)
-
- var hClash bool
- for _, e := range m.store[kh] {
- hClash = true
- if m.comparator.Equals2(e.key, key) {
- if collectStats {
- m.stats.PutHits++
- m.stats.PutHashConflicts++
- }
- return e.val, true
- }
- if collectStats {
- m.stats.PutMisses++
- }
- }
- if collectStats {
- if hClash {
- m.stats.PutHashConflicts++
- }
- }
- m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
- if collectStats {
- if len(m.store[kh]) > m.stats.MaxSlotSize {
- m.stats.MaxSlotSize = len(m.store[kh])
- }
- }
- m.len++
- if collectStats {
- m.stats.CurSize = m.len
- if m.len > m.stats.MaxSize {
- m.stats.MaxSize = m.len
- }
- }
- return val, false
-}
-
-func (m *JMap[K, V, C]) Values() []V {
- vs := make([]V, 0, len(m.store))
- for _, e := range m.store {
- for _, v := range e {
- vs = append(vs, v.val)
- }
- }
- return vs
-}
-
-func (m *JMap[K, V, C]) Get(key K) (V, bool) {
- if collectStats {
- m.stats.Gets++
- }
- var none V
- kh := m.comparator.Hash1(key)
- var hClash bool
- for _, e := range m.store[kh] {
- hClash = true
- if m.comparator.Equals2(e.key, key) {
- if collectStats {
- m.stats.GetHits++
- m.stats.GetHashConflicts++
- }
- return e.val, true
- }
- if collectStats {
- m.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- m.stats.GetHashConflicts++
- }
- m.stats.GetNoEnt++
- }
- return none, false
-}
-
-func (m *JMap[K, V, C]) Len() int {
- return m.len
-}
-
-func (m *JMap[K, V, C]) Delete(key K) {
- kh := m.comparator.Hash1(key)
- for i, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
- m.len--
- return
- }
- }
-}
-
-func (m *JMap[K, V, C]) Clear() {
- m.store = make(map[int][]*entry[K, V])
-}
-
-type JPCMap struct {
- store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
- size int
- stats *JStatRec
-}
-
-func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
- m := &JPCMap{
- store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Gets++
- }
- // Do we have a map stored by k1?
- //
- m2, present := pcm.store.Get(k1)
- if present {
- if collectStats {
- pcm.stats.GetHits++
- }
- // We found a map of values corresponding to k1, so now we need to look up k2 in that map
- //
- return m2.Get(k2)
- }
- if collectStats {
- pcm.stats.GetMisses++
- }
- return nil, false
-}
-
-func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
-
- if collectStats {
- pcm.stats.Puts++
- }
- // First does a map already exist for k1?
- //
- if m2, present := pcm.store.Get(k1); present {
- if collectStats {
- pcm.stats.PutHits++
- }
- _, present = m2.Put(k2, v)
- if !present {
- pcm.size++
- if collectStats {
- pcm.stats.CurSize = pcm.size
- if pcm.size > pcm.stats.MaxSize {
- pcm.stats.MaxSize = pcm.size
- }
- }
- }
- } else {
- // No map found for k1, so we create it, add in our value, then store is
- //
- if collectStats {
- pcm.stats.PutMisses++
- m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
- } else {
- m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
- }
-
- m2.Put(k2, v)
- pcm.store.Put(k1, m2)
- pcm.size++
- }
-}
-
-type JPCMap2 struct {
- store map[int][]JPCEntry
- size int
- stats *JStatRec
-}
-
-type JPCEntry struct {
- k1, k2, v *PredictionContext
-}
-
-func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
- m := &JPCMap2{
- store: make(map[int][]JPCEntry, 1000),
- }
- if collectStats {
- m.stats = &JStatRec{
- Source: cType,
- Description: desc,
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- m.stats.CreateStack = debug.Stack()
- }
- Statistics.AddJStatRec(m.stats)
- }
- return m
-}
-
-func dHash(k1, k2 *PredictionContext) int {
- return k1.cachedHash*31 + k2.cachedHash
-}
-
-func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Gets++
- }
-
- h := dHash(k1, k2)
- var hClash bool
- for _, e := range pcm.store[h] {
- hClash = true
- if e.k1.Equals(k1) && e.k2.Equals(k2) {
- if collectStats {
- pcm.stats.GetHits++
- pcm.stats.GetHashConflicts++
- }
- return e.v, true
- }
- if collectStats {
- pcm.stats.GetMisses++
- }
- }
- if collectStats {
- if hClash {
- pcm.stats.GetHashConflicts++
- }
- pcm.stats.GetNoEnt++
- }
- return nil, false
-}
-
-func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- pcm.stats.Puts++
- }
- h := dHash(k1, k2)
- var hClash bool
- for _, e := range pcm.store[h] {
- hClash = true
- if e.k1.Equals(k1) && e.k2.Equals(k2) {
- if collectStats {
- pcm.stats.PutHits++
- pcm.stats.PutHashConflicts++
- }
- return e.v, true
- }
- if collectStats {
- pcm.stats.PutMisses++
- }
- }
- if collectStats {
- if hClash {
- pcm.stats.PutHashConflicts++
- }
- }
- pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
- pcm.size++
- if collectStats {
- pcm.stats.CurSize = pcm.size
- if pcm.size > pcm.stats.MaxSize {
- pcm.stats.MaxSize = pcm.size
- }
- }
- return nil, false
-}
-
-type VisitEntry struct {
- k *PredictionContext
- v *PredictionContext
-}
-type VisitRecord struct {
- store map[*PredictionContext]*PredictionContext
- len int
- stats *JStatRec
-}
-
-type VisitList struct {
- cache *list.List
- lock RWMutex
-}
-
-var visitListPool = VisitList{
- cache: list.New(),
- lock: RWMutex{},
-}
-
-// NewVisitRecord returns a new VisitRecord instance from the pool if available.
-// Note that this "map" uses a pointer as a key because we are emulating the behavior of
-// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
-// which means is the key the same reference to an object rather than is it .equals() to another
-// object.
-func NewVisitRecord() *VisitRecord {
- visitListPool.lock.Lock()
- el := visitListPool.cache.Front()
- defer visitListPool.lock.Unlock()
- var vr *VisitRecord
- if el == nil {
- vr = &VisitRecord{
- store: make(map[*PredictionContext]*PredictionContext),
- }
- if collectStats {
- vr.stats = &JStatRec{
- Source: PredictionContextCacheCollection,
- Description: "VisitRecord",
- }
- // Track where we created it from if we are being asked to do so
- if runtimeConfig.statsTraceStacks {
- vr.stats.CreateStack = debug.Stack()
- }
- }
- } else {
- vr = el.Value.(*VisitRecord)
- visitListPool.cache.Remove(el)
- vr.store = make(map[*PredictionContext]*PredictionContext)
- }
- if collectStats {
- Statistics.AddJStatRec(vr.stats)
- }
- return vr
-}
-
-func (vr *VisitRecord) Release() {
- vr.len = 0
- vr.store = nil
- if collectStats {
- vr.stats.MaxSize = 0
- vr.stats.CurSize = 0
- vr.stats.Gets = 0
- vr.stats.GetHits = 0
- vr.stats.GetMisses = 0
- vr.stats.GetHashConflicts = 0
- vr.stats.GetNoEnt = 0
- vr.stats.Puts = 0
- vr.stats.PutHits = 0
- vr.stats.PutMisses = 0
- vr.stats.PutHashConflicts = 0
- vr.stats.MaxSlotSize = 0
- }
- visitListPool.lock.Lock()
- visitListPool.cache.PushBack(vr)
- visitListPool.lock.Unlock()
-}
-
-func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- vr.stats.Gets++
- }
- v := vr.store[k]
- if v != nil {
- if collectStats {
- vr.stats.GetHits++
- }
- return v, true
- }
- if collectStats {
- vr.stats.GetNoEnt++
- }
- return nil, false
-}
-
-func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
- if collectStats {
- vr.stats.Puts++
- }
- vr.store[k] = v
- vr.len++
- if collectStats {
- vr.stats.CurSize = vr.len
- if vr.len > vr.stats.MaxSize {
- vr.stats.MaxSize = vr.len
- }
- }
- return v, false
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex.go
deleted file mode 100644
index 2b0cda4745..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/mutex.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//go:build !antlr.nomutex
-// +build !antlr.nomutex
-
-package antlr
-
-import "sync"
-
-// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it
-// is used to provide a mutex implementation for the antlr package, which users
-// can turn off with the build tag -tags antlr.nomutex
-type Mutex struct {
- mu sync.Mutex
-}
-
-func (m *Mutex) Lock() {
- m.mu.Lock()
-}
-
-func (m *Mutex) Unlock() {
- m.mu.Unlock()
-}
-
-type RWMutex struct {
- mu sync.RWMutex
-}
-
-func (m *RWMutex) Lock() {
- m.mu.Lock()
-}
-
-func (m *RWMutex) Unlock() {
- m.mu.Unlock()
-}
-
-func (m *RWMutex) RLock() {
- m.mu.RLock()
-}
-
-func (m *RWMutex) RUnlock() {
- m.mu.RUnlock()
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go
deleted file mode 100644
index 35ce4353ee..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go
+++ /dev/null
@@ -1,32 +0,0 @@
-//go:build antlr.nomutex
-// +build antlr.nomutex
-
-package antlr
-
-type Mutex struct{}
-
-func (m *Mutex) Lock() {
- // No-op
-}
-
-func (m *Mutex) Unlock() {
- // No-op
-}
-
-type RWMutex struct{}
-
-func (m *RWMutex) Lock() {
- // No-op
-}
-
-func (m *RWMutex) Unlock() {
- // No-op
-}
-
-func (m *RWMutex) RLock() {
- // No-op
-}
-
-func (m *RWMutex) RUnlock() {
- // No-op
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
deleted file mode 100644
index 923c7b52c4..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
+++ /dev/null
@@ -1,47 +0,0 @@
-//go:build !antlr.stats
-
-package antlr
-
-// This file is compiled when the build configuration antlr.stats is not enabled.
-// which then allows the compiler to optimize out all the code that is not used.
-const collectStats = false
-
-// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
-type goRunStats struct {
-}
-
-var Statistics = &goRunStats{}
-
-func (s *goRunStats) AddJStatRec(_ *JStatRec) {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) CollectionAnomalies() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) Reset() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-func (s *goRunStats) Report(dir string, prefix string) error {
- // Do nothing - compiler will optimize this out (hopefully)
- return nil
-}
-
-func (s *goRunStats) Analyze() {
- // Do nothing - compiler will optimize this out (hopefully)
-}
-
-type statsOption func(*goRunStats) error
-
-func (s *goRunStats) Configure(options ...statsOption) error {
- // Do nothing - compiler will optimize this out (hopefully)
- return nil
-}
-
-func WithTopN(topN int) statsOption {
- return func(s *goRunStats) error {
- return nil
- }
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
deleted file mode 100644
index a1d5186b8f..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-var _emptyPredictionContextHash int
-
-func init() {
- _emptyPredictionContextHash = murmurInit(1)
- _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
-}
-
-func calculateEmptyHash() int {
- return _emptyPredictionContextHash
-}
-
-const (
- // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
- // doesn't mean wildcard:
- //
- // $ + x = [$,x]
- //
- // Here,
- //
- // $ = EmptyReturnState
- BasePredictionContextEmptyReturnState = 0x7FFFFFFF
-)
-
-// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
-//
-//goland:noinspection GoUnusedGlobalVariable
-var (
- BasePredictionContextglobalNodeCount = 1
- BasePredictionContextid = BasePredictionContextglobalNodeCount
-)
-
-const (
- PredictionContextEmpty = iota
- PredictionContextSingleton
- PredictionContextArray
-)
-
-// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
-// emulate inheritance from Java, and can be used without an interface definition. An interface
-// is not required because no user code will ever need to implement this interface.
-type PredictionContext struct {
- cachedHash int
- pcType int
- parentCtx *PredictionContext
- returnState int
- parents []*PredictionContext
- returnStates []int
-}
-
-func NewEmptyPredictionContext() *PredictionContext {
- nep := &PredictionContext{}
- nep.cachedHash = calculateEmptyHash()
- nep.pcType = PredictionContextEmpty
- nep.returnState = BasePredictionContextEmptyReturnState
- return nep
-}
-
-func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
- pc := &PredictionContext{}
- pc.pcType = PredictionContextSingleton
- pc.returnState = returnState
- pc.parentCtx = parent
- if parent != nil {
- pc.cachedHash = calculateHash(parent, returnState)
- } else {
- pc.cachedHash = calculateEmptyHash()
- }
- return pc
-}
-
-func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
- if returnState == BasePredictionContextEmptyReturnState && parent == nil {
- // someone can pass in the bits of an array ctx that mean $
- return BasePredictionContextEMPTY
- }
- return NewBaseSingletonPredictionContext(parent, returnState)
-}
-
-func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
- // Parent can be nil only if full ctx mode and we make an array
- // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
- // nil parent and
- // returnState == {@link //EmptyReturnState}.
- hash := murmurInit(1)
- for _, parent := range parents {
- hash = murmurUpdate(hash, parent.Hash())
- }
- for _, returnState := range returnStates {
- hash = murmurUpdate(hash, returnState)
- }
- hash = murmurFinish(hash, len(parents)<<1)
-
- nec := &PredictionContext{}
- nec.cachedHash = hash
- nec.pcType = PredictionContextArray
- nec.parents = parents
- nec.returnStates = returnStates
- return nec
-}
-
-func (p *PredictionContext) Hash() int {
- return p.cachedHash
-}
-
-func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
- if p == other {
- return true
- }
- switch p.pcType {
- case PredictionContextEmpty:
- otherP := other.(*PredictionContext)
- return other == nil || otherP == nil || otherP.isEmpty()
- case PredictionContextSingleton:
- return p.SingletonEquals(other)
- case PredictionContextArray:
- return p.ArrayEquals(other)
- }
- return false
-}
-
-func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
- if o == nil {
- return false
- }
- other := o.(*PredictionContext)
- if other == nil || other.pcType != PredictionContextArray {
- return false
- }
- if p.cachedHash != other.Hash() {
- return false // can't be same if hash is different
- }
-
- // Must compare the actual array elements and not just the array address
- //
- return intSlicesEqual(p.returnStates, other.returnStates) &&
- pcSliceEqual(p.parents, other.parents)
-}
-
-func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
- if other == nil {
- return false
- }
- otherP := other.(*PredictionContext)
- if otherP == nil || otherP.pcType != PredictionContextSingleton {
- return false
- }
-
- if p.cachedHash != otherP.Hash() {
- return false // Can't be same if hash is different
- }
-
- if p.returnState != otherP.getReturnState(0) {
- return false
- }
-
- // Both parents must be nil if one is
- if p.parentCtx == nil {
- return otherP.parentCtx == nil
- }
-
- return p.parentCtx.Equals(otherP.parentCtx)
-}
-
-func (p *PredictionContext) GetParent(i int) *PredictionContext {
- switch p.pcType {
- case PredictionContextEmpty:
- return nil
- case PredictionContextSingleton:
- return p.parentCtx
- case PredictionContextArray:
- return p.parents[i]
- }
- return nil
-}
-
-func (p *PredictionContext) getReturnState(i int) int {
- switch p.pcType {
- case PredictionContextArray:
- return p.returnStates[i]
- default:
- return p.returnState
- }
-}
-
-func (p *PredictionContext) GetReturnStates() []int {
- switch p.pcType {
- case PredictionContextArray:
- return p.returnStates
- default:
- return []int{p.returnState}
- }
-}
-
-func (p *PredictionContext) length() int {
- switch p.pcType {
- case PredictionContextArray:
- return len(p.returnStates)
- default:
- return 1
- }
-}
-
-func (p *PredictionContext) hasEmptyPath() bool {
- switch p.pcType {
- case PredictionContextSingleton:
- return p.returnState == BasePredictionContextEmptyReturnState
- }
- return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
-}
-
-func (p *PredictionContext) String() string {
- switch p.pcType {
- case PredictionContextEmpty:
- return "$"
- case PredictionContextSingleton:
- var up string
-
- if p.parentCtx == nil {
- up = ""
- } else {
- up = p.parentCtx.String()
- }
-
- if len(up) == 0 {
- if p.returnState == BasePredictionContextEmptyReturnState {
- return "$"
- }
-
- return strconv.Itoa(p.returnState)
- }
-
- return strconv.Itoa(p.returnState) + " " + up
- case PredictionContextArray:
- if p.isEmpty() {
- return "[]"
- }
-
- s := "["
- for i := 0; i < len(p.returnStates); i++ {
- if i > 0 {
- s = s + ", "
- }
- if p.returnStates[i] == BasePredictionContextEmptyReturnState {
- s = s + "$"
- continue
- }
- s = s + strconv.Itoa(p.returnStates[i])
- if !p.parents[i].isEmpty() {
- s = s + " " + p.parents[i].String()
- } else {
- s = s + "nil"
- }
- }
- return s + "]"
-
- default:
- return "unknown"
- }
-}
-
-func (p *PredictionContext) isEmpty() bool {
- switch p.pcType {
- case PredictionContextEmpty:
- return true
- case PredictionContextArray:
- // since EmptyReturnState can only appear in the last position, we
- // don't need to verify that size==1
- return p.returnStates[0] == BasePredictionContextEmptyReturnState
- default:
- return false
- }
-}
-
-func (p *PredictionContext) Type() int {
- return p.pcType
-}
-
-func calculateHash(parent *PredictionContext, returnState int) int {
- h := murmurInit(1)
- h = murmurUpdate(h, parent.Hash())
- h = murmurUpdate(h, returnState)
- return murmurFinish(h, 2)
-}
-
-// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
-// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
-// /
-func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- // if we are in RuleContext of start rule, s, then BasePredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
- return BasePredictionContextEMPTY
- }
- // If we have a parent, convert it to a BasePredictionContext graph
- parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
- state := a.states[outerContext.GetInvokingState()]
- transition := state.GetTransitions()[0]
-
- return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
-}
-
-func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
-
- // Share same graph if both same
- //
- if a == b || a.Equals(b) {
- return a
- }
-
- if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
- return mergeSingletons(a, b, rootIsWildcard, mergeCache)
- }
- // At least one of a or b is array
- // If one is $ and rootIsWildcard, return $ as wildcard
- if rootIsWildcard {
- if a.isEmpty() {
- return a
- }
- if b.isEmpty() {
- return b
- }
- }
-
- // Convert either Singleton or Empty to arrays, so that we can merge them
- //
- ara := convertToArray(a)
- arb := convertToArray(b)
- return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
-}
-
-func convertToArray(pc *PredictionContext) *PredictionContext {
- switch pc.Type() {
- case PredictionContextEmpty:
- return NewArrayPredictionContext([]*PredictionContext{}, []int{})
- case PredictionContextSingleton:
- return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
- default:
- // Already an array
- }
- return pc
-}
-
-// mergeSingletons merges two Singleton [PredictionContext] instances.
-//
-// Stack tops equal, parents merge is same return left graph.
-//
-//
-// Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A new root node is created to point to the
-// merged parents.
-//
-//
-// Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
-//
-// Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// @param mergeCache
-// /
-func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
- if mergeCache != nil {
- previous, present := mergeCache.Get(a, b)
- if present {
- return previous
- }
- previous, present = mergeCache.Get(b, a)
- if present {
- return previous
- }
- }
-
- rootMerge := mergeRoot(a, b, rootIsWildcard)
- if rootMerge != nil {
- if mergeCache != nil {
- mergeCache.Put(a, b, rootMerge)
- }
- return rootMerge
- }
- if a.returnState == b.returnState {
- parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
- // if parent is same as existing a or b parent or reduced to a parent,
- // return it
- if parent.Equals(a.parentCtx) {
- return a // ax + bx = ax, if a=b
- }
- if parent.Equals(b.parentCtx) {
- return b // ax + bx = bx, if a=b
- }
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array.
- // New joined parent so create a new singleton pointing to it, a'
- spc := SingletonBasePredictionContextCreate(parent, a.returnState)
- if mergeCache != nil {
- mergeCache.Put(a, b, spc)
- }
- return spc
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- var singleParent *PredictionContext
- if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
- // bx =
- // [a,b]x
- singleParent = a.parentCtx
- }
- if singleParent != nil { // parents are same
- // sort payloads and use same parent
- payloads := []int{a.returnState, b.returnState}
- if a.returnState > b.returnState {
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- }
- parents := []*PredictionContext{singleParent, singleParent}
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.Put(a, b, apc)
- }
- return apc
- }
- // parents differ and can't merge them. Just pack together
- // into array can't merge.
- // ax + by = [ax,by]
- payloads := []int{a.returnState, b.returnState}
- parents := []*PredictionContext{a.parentCtx, b.parentCtx}
- if a.returnState > b.returnState { // sort by payload
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- parents = []*PredictionContext{b.parentCtx, a.parentCtx}
- }
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.Put(a, b, apc)
- }
- return apc
-}
-
-// Handle case where at least one of {@code a} or {@code b} is
-// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
-// to represent {@link //EMPTY}.
-//
-// Local-Context Merges
-//
-// These local-context merge operations are used when {@code rootIsWildcard}
-// is true.
-//
-// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
-//
-// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
-//
-// Special case of last merge if local context.
-//
-//
-// Full-Context Merges
-//
-// These full-context merge operations are used when {@code rootIsWildcard}
-// is false.
-//
-//
-//
-// Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
-//
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// /
-func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
- if rootIsWildcard {
- if a.pcType == PredictionContextEmpty {
- return BasePredictionContextEMPTY // // + b =//
- }
- if b.pcType == PredictionContextEmpty {
- return BasePredictionContextEMPTY // a +// =//
- }
- } else {
- if a.isEmpty() && b.isEmpty() {
- return BasePredictionContextEMPTY // $ + $ = $
- } else if a.isEmpty() { // $ + x = [$,x]
- payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []*PredictionContext{b.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
- payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []*PredictionContext{a.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- }
- }
- return nil
-}
-
-// Merge two {@link ArrayBasePredictionContext} instances.
-//
-// Different tops, different parents.
-//
-//
-// Shared top, same parents.
-//
-//
-// Shared top, different parents.
-//
-//
-// Shared top, all shared parents.
-//
-//
-// Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
-//
-//goland:noinspection GoBoolExpressions
-func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
- if mergeCache != nil {
- previous, present := mergeCache.Get(a, b)
- if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous
- }
- previous, present = mergeCache.Get(b, a)
- if present {
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous
- }
- }
- // merge sorted payloads a + b => M
- i := 0 // walks a
- j := 0 // walks b
- k := 0 // walks target M array
-
- mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
- mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
- // walk and merge to yield mergedParents, mergedReturnStates
- for i < len(a.returnStates) && j < len(b.returnStates) {
- aParent := a.parents[i]
- bParent := b.parents[j]
- if a.returnStates[i] == b.returnStates[j] {
- // same payload (stack tops are equal), must yield merged singleton
- payload := a.returnStates[i]
- // $+$ = $
- bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
- // ->
- // ax
- if bothDollars || axAX {
- mergedParents[k] = aParent // choose left
- mergedReturnStates[k] = payload
- } else { // ax+ay -> a'[x,y]
- mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
- mergedParents[k] = mergedParent
- mergedReturnStates[k] = payload
- }
- i++ // hop over left one as usual
- j++ // but also Skip one in right side since we merge
- } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
- mergedParents[k] = aParent
- mergedReturnStates[k] = a.returnStates[i]
- i++
- } else { // b > a, copy b[j] to M
- mergedParents[k] = bParent
- mergedReturnStates[k] = b.returnStates[j]
- j++
- }
- k++
- }
- // copy over any payloads remaining in either array
- if i < len(a.returnStates) {
- for p := i; p < len(a.returnStates); p++ {
- mergedParents[k] = a.parents[p]
- mergedReturnStates[k] = a.returnStates[p]
- k++
- }
- } else {
- for p := j; p < len(b.returnStates); p++ {
- mergedParents[k] = b.parents[p]
- mergedReturnStates[k] = b.returnStates[p]
- k++
- }
- }
- // trim merged if we combined a few that had same stack tops
- if k < len(mergedParents) { // write index < last position trim
- if k == 1 { // for just one merged element, return singleton top
- pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
- if mergeCache != nil {
- mergeCache.Put(a, b, pc)
- }
- return pc
- }
- mergedParents = mergedParents[0:k]
- mergedReturnStates = mergedReturnStates[0:k]
- }
-
- M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
-
- // if we created same array as a or b, return that instead
- // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
- if M.Equals(a) {
- if mergeCache != nil {
- mergeCache.Put(a, b, a)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
- }
- return a
- }
- if M.Equals(b) {
- if mergeCache != nil {
- mergeCache.Put(a, b, b)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
- }
- return b
- }
- combineCommonParents(&mergedParents)
-
- if mergeCache != nil {
- mergeCache.Put(a, b, M)
- }
- if runtimeConfig.parserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
- }
- return M
-}
-
-// Make pass over all M parents and merge any Equals() ones.
-// Note that we pass a pointer to the slice as we want to modify it in place.
-//
-//goland:noinspection GoUnusedFunction
-func combineCommonParents(parents *[]*PredictionContext) {
- uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
-
- for p := 0; p < len(*parents); p++ {
- parent := (*parents)[p]
- _, _ = uniqueParents.Put(parent)
- }
- for q := 0; q < len(*parents); q++ {
- pc, _ := uniqueParents.Get((*parents)[q])
- (*parents)[q] = pc
- }
-}
-
-func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
- if context.isEmpty() {
- return context
- }
- existing, present := visited.Get(context)
- if present {
- return existing
- }
-
- existing, present = contextCache.Get(context)
- if present {
- visited.Put(context, existing)
- return existing
- }
- changed := false
- parents := make([]*PredictionContext, context.length())
- for i := 0; i < len(parents); i++ {
- parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
- if changed || !parent.Equals(context.GetParent(i)) {
- if !changed {
- parents = make([]*PredictionContext, context.length())
- for j := 0; j < context.length(); j++ {
- parents[j] = context.GetParent(j)
- }
- changed = true
- }
- parents[i] = parent
- }
- }
- if !changed {
- contextCache.add(context)
- visited.Put(context, context)
- return context
- }
- var updated *PredictionContext
- if len(parents) == 0 {
- updated = BasePredictionContextEMPTY
- } else if len(parents) == 1 {
- updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
- } else {
- updated = NewArrayPredictionContext(parents, context.GetReturnStates())
- }
- contextCache.add(updated)
- visited.Put(updated, updated)
- visited.Put(context, updated)
-
- return updated
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
deleted file mode 100644
index 25dfb11e8f..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package antlr
-
-var BasePredictionContextEMPTY = &PredictionContext{
- cachedHash: calculateEmptyHash(),
- pcType: PredictionContextEmpty,
- returnState: BasePredictionContextEmptyReturnState,
-}
-
-// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
-// context cash associated with contexts in DFA states. This cache
-// can be used for both lexers and parsers.
-type PredictionContextCache struct {
- cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
-}
-
-func NewPredictionContextCache() *PredictionContextCache {
- return &PredictionContextCache{
- cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
- }
-}
-
-// Add a context to the cache and return it. If the context already exists,
-// return that one instead and do not add a new context to the cache.
-// Protect shared cache from unsafe thread access.
-func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
- if ctx.isEmpty() {
- return BasePredictionContextEMPTY
- }
-
- // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
- // the same pointer), otherwise it will add the new entry and return that.
- //
- existing, present := p.cache.Get(ctx)
- if present {
- return existing
- }
- p.cache.Put(ctx, ctx)
- return ctx
-}
-
-func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
- pc, exists := p.cache.Get(ctx)
- return pc, exists
-}
-
-func (p *PredictionContextCache) length() int {
- return p.cache.Len()
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
deleted file mode 100644
index 3f85a6a520..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// This enumeration defines the prediction modes available in ANTLR 4 along with
-// utility methods for analyzing configuration sets for conflicts and/or
-// ambiguities.
-
-const (
- // PredictionModeSLL represents the SLL(*) prediction mode.
- // This prediction mode ignores the current
- // parser context when making predictions. This is the fastest prediction
- // mode, and provides correct results for many grammars. This prediction
- // mode is more powerful than the prediction mode provided by ANTLR 3, but
- // may result in syntax errors for grammar and input combinations which are
- // not SLL.
- //
- // When using this prediction mode, the parser will either return a correct
- // parse tree (i.e. the same parse tree that would be returned with the
- // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
- // syntax error is encountered when using the SLL prediction mode,
- // it may be due to either an actual syntax error in the input or indicate
- // that the particular combination of grammar and input requires the more
- // powerful LL prediction abilities to complete successfully.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeSLL = 0
-
- // PredictionModeLL represents the LL(*) prediction mode.
- // This prediction mode allows the current parser
- // context to be used for resolving SLL conflicts that occur during
- // prediction. This is the fastest prediction mode that guarantees correct
- // parse results for all combinations of grammars with syntactically correct
- // inputs.
- //
- // When using this prediction mode, the parser will make correct decisions
- // for all syntactically-correct grammar and input combinations. However, in
- // cases where the grammar is truly ambiguous this prediction mode might not
- // report a precise answer for exactly which alternatives are
- // ambiguous.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLL = 1
-
- // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
- // with exact ambiguity detection.
- //
- // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
- // this prediction mode instructs the prediction algorithm to determine the
- // complete and exact set of ambiguous alternatives for every ambiguous
- // decision encountered while parsing.
- //
- // This prediction mode may be used for diagnosing ambiguities during
- // grammar development. Due to the performance overhead of calculating sets
- // of ambiguous alternatives, this prediction mode should be avoided when
- // the exact results are not necessary.
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLLExactAmbigDetection = 2
-)
-
-// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
-//
-// This method computes the SLL prediction termination condition for both of
-// the following cases:
-//
-// - The usual SLL+LL fallback upon SLL conflict
-// - Pure SLL without LL fallback
-//
-// # Combined SLL+LL Parsing
-//
-// When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction.
-//
-// Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative, e.g.
-//
-// {1,2} and {3,4}
-//
-// If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations.
-//
-// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty.
-//
-// # Heuristic
-//
-// As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon):
-//
-// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
-//
-// When the [ATN] simulation reaches the state before ';', it has a
-// [DFA] state that looks like:
-//
-// [12|1|[], 6|2|[], 12|2|[]]
-//
-// Naturally
-//
-// 12|1|[] and 12|2|[]
-//
-// conflict, but we cannot stop processing this node because alternative to has another way to continue,
-// via
-//
-// [6|2|[]]
-//
-// It also let's us continue for this rule:
-//
-// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state immediately before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue, and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done.
-//
-// # Pure SLL Parsing
-//
-// To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic.
-//
-// # Predicates in SLL+LL Parsing
-//
-// SLL decisions don't evaluate predicates until after they reach [DFA] stop
-// states because they need to create the [DFA] cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation, so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates.
-//
-// Implementation-wise, [ATNConfigSet] combines stack contexts but not
-// semantic predicate contexts, so we might see two configurations like the
-// following:
-//
-// (s, 1, x, {}), (s, 1, x', {p})
-//
-// Before testing these configurations against others, we have to merge
-// x and x' (without modifying the existing configurations).
-// For example, we test (x+x')==x” when looking for conflicts in
-// the following configurations:
-//
-// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
-//
-// If the configuration set has predicates (as indicated by
-// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
-// the configurations to strip out all the predicates so that a standard
-// [ATNConfigSet] will merge everything ignoring predicates.
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
-
- // Configs in rule stop states indicate reaching the end of the decision
- // rule (local context) or end of start rule (full context). If all
- // configs meet this condition, then none of the configurations is able
- // to Match additional input, so we terminate prediction.
- //
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return true
- }
-
- // pure SLL mode parsing
- if mode == PredictionModeSLL {
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL costs more time
- // since we'll often fail over anyway.
- if configs.hasSemanticContext {
- // dup configs, tossing out semantic predicates
- dup := NewATNConfigSet(false)
- for _, c := range configs.configs {
-
- // NewATNConfig({semanticContext:}, c)
- c = NewATNConfig2(c, SemanticContextNone)
- dup.Add(c, nil)
- }
- configs = dup
- }
- // now we have combined contexts for configs with dissimilar predicates
- }
- // pure SLL or combined SLL+LL mode parsing
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
-}
-
-// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
-// [RuleStopState]. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
-func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
- for _, c := range configs.configs {
- if _, ok := c.GetState().(*RuleStopState); ok {
- return true
- }
- }
- return false
-}
-
-// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
-// [RuleStopState]. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// the func returns true if all configurations in configs are in a
-// [RuleStopState]
-func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
-
- for _, c := range configs.configs {
- if _, ok := c.GetState().(*RuleStopState); !ok {
- return false
- }
- }
- return true
-}
-
-// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
-//
-// Can we stop looking ahead during [ATN] simulation or is there some
-// uncertainty as to which alternative we will ultimately pick, after
-// consuming more input? Even if there are partial conflicts, we might know
-// that everything is going to resolve to the same minimum alternative. That
-// means we can stop since no more lookahead will change that fact. On the
-// other hand, there might be multiple conflicts that resolve to different
-// minimums. That means we need more look ahead to decide which of those
-// alternatives we should predict.
-//
-// The basic idea is to split the set of configurations 'C', into
-// conflicting subsets (s, _, ctx, _) and singleton subsets with
-// non-conflicting configurations. Two configurations conflict if they have
-// identical [ATNConfig].state and [ATNConfig].context values
-// but a different [ATNConfig].alt value, e.g.
-//
-// (s, i, ctx, _)
-//
-// and
-//
-// (s, j, ctx, _) ; for i != j
-//
-// Reduce these configuration subsets to the set of possible alternatives.
-// You can compute the alternative subsets in one pass as follows:
-//
-// A_s,ctx = {i | (s, i, ctx, _)}
-//
-// for each configuration in C holding s and ctx fixed.
-//
-// Or in pseudo-code:
-//
-// for each configuration c in C:
-// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
-//
-// The values in map are the set of
-//
-// A_s,ctx
-//
-// sets.
-//
-// If
-//
-// |A_s,ctx| = 1
-//
-// then there is no conflict associated with s and ctx.
-//
-// Reduce the subsets to singletons by choosing a minimum of each subset. If
-// the union of these alternative subsets is a singleton, then no amount of
-// further lookahead will help us. We will always pick that alternative. If,
-// however, there is more than one alternative, then we are uncertain which
-// alternative to predict and must continue looking for resolution. We may
-// or may not discover an ambiguity in the future, even if there are no
-// conflicting subsets this round.
-//
-// The biggest sin is to terminate early because it means we've made a
-// decision but were uncertain as to the eventual outcome. We haven't used
-// enough lookahead. On the other hand, announcing a conflict too late is no
-// big deal; you will still have the conflict. It's just inefficient. It
-// might even look until the end of file.
-//
-// No special consideration for semantic predicates is required because
-// predicates are evaluated on-the-fly for full LL prediction, ensuring that
-// no configuration contains a semantic context during the termination
-// check.
-//
-// # Conflicting Configs
-//
-// Two configurations:
-//
-// (s, i, x) and (s, j, x')
-//
-// conflict when i != j but x = x'. Because we merge all
-// (s, i, _) configurations together, that means that there are at
-// most n configurations associated with state s for
-// n possible alternatives in the decision. The merged stacks
-// complicate the comparison of configuration contexts x and x'.
-//
-// Sam checks to see if one is a subset of the other by calling
-// merge and checking to see if the merged result is either x or x'.
-// If the x associated with lowest alternative i
-// is the superset, then i is the only possible prediction since the
-// others resolve to min(i) as well. However, if x is
-// associated with j > i then at least one stack configuration for
-// j is not in conflict with alternative i. The algorithm
-// should keep going, looking for more lookahead due to the uncertainty.
-//
-// For simplicity, I'm doing an equality check between x and
-// x', which lets the algorithm continue to consume lookahead longer
-// than necessary. The reason I like the equality is of course the
-// simplicity but also because that is the test you need to detect the
-// alternatives that are actually in conflict.
-//
-// # Continue/Stop Rule
-//
-// Continue if the union of resolved alternative sets from non-conflicting and
-// conflicting alternative subsets has more than one alternative. We are
-// uncertain about which alternative to predict.
-//
-// The complete set of alternatives,
-//
-// [i for (_, i, _)]
-//
-// tells us which alternatives are still in the running for the amount of input we've
-// consumed at this point. The conflicting sets let us to strip away
-// configurations that won't lead to more states because we resolve
-// conflicts to the configuration with a minimum alternate for the
-// conflicting set.
-//
-// Cases
-//
-// - no conflicts and more than 1 alternative in set => continue
-// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
-// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
-// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
-// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
-// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
-// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
-// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
-// {1} ∪ {2} = {1,2} => continue
-// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
-// {1} ∪ {2} = {1,2} => continue
-// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
-// {1} ∪ {3} = {1,3} => continue
-//
-// # Exact Ambiguity Detection
-//
-// If all states report the same conflicting set of alternatives, then we
-// know we have the exact ambiguity set:
-//
-// |A_i| > 1
-//
-// and
-//
-// A_i = A_j ; for all i, j
-//
-// In other words, we continue examining lookahead until all A_i
-// have more than one alternative and all A_i are the same. If
-//
-// A={{1,2}, {1,3}}
-//
-// then regular LL prediction would terminate because the resolved set is {1}.
-// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
-// two or one and three so we keep going. We can only stop prediction when
-// we need exact ambiguity detection when the sets look like:
-//
-// A={{1,2}}
-//
-// or
-//
-// {{1,2},{1,2}}, etc...
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
- return PredictionModegetSingleViableAlt(altsets)
-}
-
-// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
-// than one alternative.
-//
-// The func returns true if every [BitSet] in altsets has
-// [BitSet].cardinality cardinality > 1
-func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
- return !PredictionModehasNonConflictingAltSet(altsets)
-}
-
-// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
-// exactly one alternative.
-//
-// The func returns true if altsets contains at least one [BitSet] with
-// [BitSet].cardinality cardinality 1
-func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() == 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
-// more than one alternative.
-//
-// The func returns true if altsets contains a [BitSet] with
-// [BitSet].cardinality cardinality > 1, otherwise false
-func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() > 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
-//
-// The func returns true if every member of altsets is equal to the others.
-func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
- var first *BitSet
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if first == nil {
- first = alts
- } else if alts != first {
- return false
- }
- }
-
- return true
-}
-
-// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
-// altsets. If no such alternative exists, this method returns
-// [ATNInvalidAltNumber].
-//
-// @param altsets a collection of alternative subsets
-func PredictionModegetUniqueAlt(altsets []*BitSet) int {
- all := PredictionModeGetAlts(altsets)
- if all.length() == 1 {
- return all.minValue()
- }
-
- return ATNInvalidAltNumber
-}
-
-// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
-// alternative subsets. This method returns the union of each [BitSet]
-// in altsets, being the set of represented alternatives in altsets.
-func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
- all := NewBitSet()
- for _, alts := range altsets {
- all.or(alts)
- }
- return all
-}
-
-// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
-//
-// for each configuration c in configs:
-// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
-func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
- configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
-
- for _, c := range configs.configs {
-
- alts, ok := configToAlts.Get(c)
- if !ok {
- alts = NewBitSet()
- configToAlts.Put(c, alts)
- }
- alts.add(c.GetAlt())
- }
-
- return configToAlts.Values()
-}
-
-// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
-//
-// for each configuration c in configs:
-// map[c.ATNConfig.state] U= c.ATNConfig.alt}
-func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.configs {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
-// if there is one.
-//
-// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
deleted file mode 100644
index f2ad04793e..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// RuleContext is a record of a single rule invocation. It knows
-// which context invoked it, if any. If there is no parent context, then
-// naturally the invoking state is not valid. The parent link
-// provides a chain upwards from the current rule invocation to the root
-// of the invocation tree, forming a stack.
-//
-// We actually carry no information about the rule associated with this context (except
-// when parsing). We keep only the state number of the invoking state from
-// the [ATN] submachine that invoked this. Contrast this with the s
-// pointer inside [ParserRuleContext] that tracks the current state
-// being "executed" for the current rule.
-//
-// The parent contexts are useful for computing lookahead sets and
-// getting error information.
-//
-// These objects are used during parsing and prediction.
-// For the special case of parsers, we use the struct
-// [ParserRuleContext], which embeds a RuleContext.
-//
-// @see ParserRuleContext
-type RuleContext interface {
- RuleNode
-
- GetInvokingState() int
- SetInvokingState(int)
-
- GetRuleIndex() int
- IsEmpty() bool
-
- GetAltNumber() int
- SetAltNumber(altNumber int)
-
- String([]string, RuleContext) string
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
deleted file mode 100644
index 8cb5f3ed6f..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/statistics.go
+++ /dev/null
@@ -1,280 +0,0 @@
-//go:build antlr.stats
-
-package antlr
-
-import (
- "fmt"
- "log"
- "os"
- "path/filepath"
- "sort"
- "strconv"
-)
-
-// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
-// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
-//
-
-// Tells various components to collect statistics - because it is only true when this file is included, it will
-// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
-const collectStats = true
-
-// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
-// It is exported so that it can be used by others to look for things that are not already looked for in the
-// runtime statistics.
-type goRunStats struct {
-
- // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
- // during a run. It is exported so that it can be used by others to look for things that are not already looked for
- // within this package.
- //
- jStats []*JStatRec
- jStatsLock RWMutex
- topN int
- topNByMax []*JStatRec
- topNByUsed []*JStatRec
- unusedCollections map[CollectionSource]int
- counts map[CollectionSource]int
-}
-
-const (
- collectionsFile = "collections"
-)
-
-var (
- Statistics = &goRunStats{
- topN: 10,
- }
-)
-
-type statsOption func(*goRunStats) error
-
-// Configure allows the statistics system to be configured as the user wants and override the defaults
-func (s *goRunStats) Configure(options ...statsOption) error {
- for _, option := range options {
- err := option(s)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
-//
-// For example, if you want to see the top 20 collections by size, you can do:
-//
-// antlr.Statistics.Configure(antlr.WithTopN(20))
-func WithTopN(topN int) statsOption {
- return func(s *goRunStats) error {
- s.topN = topN
- return nil
- }
-}
-
-// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
-//
-// The function gathers and analyzes a number of statistics about any particular run of
-// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
-// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
-// especially useful in tracking down bugs or performance problems when an ANTLR user could
-// supply the output from this package, but cannot supply the grammar file(s) they are using, even
-// privately to the maintainers.
-//
-// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
-// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
-// extant unless the calling program is built with the antlr.stats tag like so:
-//
-// go build -tags antlr.stats .
-//
-// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
-// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
-// [Statistics.Report] function to report the statistics.
-//
-// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
-// me [Jim Idle] directly at jimi@idle.ws
-//
-// [Jim Idle]: https:://github.com/jim-idle
-func (s *goRunStats) Analyze() {
-
- // Look for anything that looks strange and record it in our local maps etc for the report to present it
- //
- s.CollectionAnomalies()
- s.TopNCollections()
-}
-
-// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
-func (s *goRunStats) TopNCollections() {
-
- // Let's sort the stat records by MaxSize
- //
- sort.Slice(s.jStats, func(i, j int) bool {
- return s.jStats[i].MaxSize > s.jStats[j].MaxSize
- })
-
- for i := 0; i < len(s.jStats) && i < s.topN; i++ {
- s.topNByMax = append(s.topNByMax, s.jStats[i])
- }
-
- // Sort by the number of times used
- //
- sort.Slice(s.jStats, func(i, j int) bool {
- return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
- })
- for i := 0; i < len(s.jStats) && i < s.topN; i++ {
- s.topNByUsed = append(s.topNByUsed, s.jStats[i])
- }
-}
-
-// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
-// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
-// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
-func (s *goRunStats) Report(dir string, prefix string) error {
-
- isDir, err := isDirectory(dir)
- switch {
- case err != nil:
- return err
- case !isDir:
- return fmt.Errorf("output directory `%s` is not a directory", dir)
- }
- s.reportCollections(dir, prefix)
-
- // Clean out any old data in case the user forgets
- //
- s.Reset()
- return nil
-}
-
-func (s *goRunStats) Reset() {
- s.jStats = nil
- s.topNByUsed = nil
- s.topNByMax = nil
-}
-
-func (s *goRunStats) reportCollections(dir, prefix string) {
- cname := filepath.Join(dir, ".asciidoctor")
- // If the file doesn't exist, create it, or append to the file
- f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatal(err)
- }
- _, _ = f.WriteString(`// .asciidoctorconfig
-++++
-
-++++`)
- _ = f.Close()
-
- fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
- // If the file doesn't exist, create it, or append to the file
- f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatal(err)
- }
- defer func(f *os.File) {
- err := f.Close()
- if err != nil {
- log.Fatal(err)
- }
- }(f)
- _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
-
- _, _ = f.WriteString("== Summary\n")
-
- if s.unusedCollections != nil {
- _, _ = f.WriteString("=== Unused Collections\n")
- _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
- _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
- _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
- _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
- _, _ = f.WriteString(" actually needed.\n\n")
-
- _, _ = f.WriteString("\n.Unused collections\n")
- _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Type | Count\n")
-
- for k, v := range s.unusedCollections {
- _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
- }
- f.WriteString("|===\n\n")
- }
-
- _, _ = f.WriteString("\n.Summary of Collections\n")
- _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Type | Count\n")
- for k, v := range s.counts {
- _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
- }
- _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
- _, _ = f.WriteString("|===\n\n")
-
- _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
- _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
- for _, c := range s.topNByMax {
- _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
- _, _ = f.WriteString("| " + c.Description + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
- _, _ = f.WriteString("\n")
- }
- _, _ = f.WriteString("|===\n\n")
-
- _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
- _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
- _, _ = f.WriteString("|===\n")
- _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
- for _, c := range s.topNByUsed {
- _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
- _, _ = f.WriteString("| " + c.Description + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
- _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
- _, _ = f.WriteString("\n")
- }
- _, _ = f.WriteString("|===\n\n")
-}
-
-// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
-func (s *goRunStats) AddJStatRec(rec *JStatRec) {
- s.jStatsLock.Lock()
- defer s.jStatsLock.Unlock()
- s.jStats = append(s.jStats, rec)
-}
-
-// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
-func (s *goRunStats) CollectionAnomalies() {
- s.jStatsLock.RLock()
- defer s.jStatsLock.RUnlock()
- s.counts = make(map[CollectionSource]int, len(s.jStats))
- for _, c := range s.jStats {
-
- // Accumlate raw counts
- //
- s.counts[c.Source]++
-
- // Look for allocated but unused collections and count them
- if c.MaxSize == 0 && c.Puts == 0 {
- if s.unusedCollections == nil {
- s.unusedCollections = make(map[CollectionSource]int)
- }
- s.unusedCollections[c.Source]++
- }
- if c.MaxSize > 6000 {
- fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
- }
- }
-
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
deleted file mode 100644
index 4d9eb94e5f..0000000000
--- a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package antlr
-
-// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
-// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
-// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
-// for ideas on what can be gleaned from these statistics about collections.
-type JStatRec struct {
- Source CollectionSource
- MaxSize int
- CurSize int
- Gets int
- GetHits int
- GetMisses int
- GetHashConflicts int
- GetNoEnt int
- Puts int
- PutHits int
- PutMisses int
- PutHashConflicts int
- MaxSlotSize int
- Description string
- CreateStack []byte
-}
diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go
new file mode 100644
index 0000000000..e9bb0efe77
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go
@@ -0,0 +1,1385 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v. If v is nil or not a pointer,
+// Unmarshal returns an InvalidUnmarshalError.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalJSON method, including
+// when the input is a JSON null.
+// Otherwise, if the value implements encoding.TextUnmarshaler
+// and the input is a JSON quoted string, Unmarshal calls that value's
+// UnmarshalText method with the unquoted form of the string.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match. By
+// default, object keys which don't have a corresponding struct field are
+// ignored (see Decoder.DisallowUnknownFields for an alternative).
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores
+// key-value pairs from the JSON object into the map. The map's key type must
+// either be any string type, an integer, implement json.Unmarshaler, or
+// implement encoding.TextUnmarshaler.
+//
+// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error. In any
+// case, it's not guaranteed that all the remaining fields following
+// the problematic one will be unmarshaled into the target object.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// “not present,” unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+func Unmarshal(data []byte, v any) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ d := ds.Get().(*decodeState)
+ defer ds.Put(d)
+ //var d decodeState
+ d.useNumber = true
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+var ds = sync.Pool{
+ New: func() any {
+ return new(decodeState)
+ },
+}
+
+func UnmarshalWithKeys(data []byte, v any) ([]string, error) {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+
+ d := ds.Get().(*decodeState)
+ defer ds.Put(d)
+ //var d decodeState
+ d.useNumber = true
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return nil, err
+ }
+
+ d.init(data)
+ err = d.unmarshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return d.lastKeys, nil
+}
+
+func UnmarshalValid(data []byte, v any) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ d := ds.Get().(*decodeState)
+ defer ds.Put(d)
+ //var d decodeState
+ d.useNumber = true
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+
+ d := ds.Get().(*decodeState)
+ defer ds.Put(d)
+ //var d decodeState
+ d.useNumber = true
+
+ d.init(data)
+ err := d.unmarshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return d.lastKeys, nil
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+//
+// By convention, to approximate the behavior of Unmarshal itself,
+// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+ Struct string // name of the struct type containing the field
+ Field string // the full path from root node to the field
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ if e.Struct != "" || e.Field != "" {
+ return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
+ }
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+//
+// Deprecated: No longer used; kept for compatibility.
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Pointer {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v any) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Pointer || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ d.scanWhile(scanSkipSpace)
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ err := d.value(rv)
+ if err != nil {
+ return d.addErrorContext(err)
+ }
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// An errorContext provides context for type errors during decoding.
+type errorContext struct {
+ Struct reflect.Type
+ FieldStack []string
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // next read offset in data
+ opcode int // last read result
+ scan scanner
+ errorContext *errorContext
+ savedError error
+ useNumber bool
+ disallowUnknownFields bool
+ lastKeys []string
+}
+
+// readIndex returns the position of the last byte read.
+func (d *decodeState) readIndex() int {
+ return d.off - 1
+}
+
+// phasePanicMsg is used as a panic message when we end up with something that
+// shouldn't happen. It can indicate a bug in the JSON decoder, or that
+// something is editing the data slice while the decoder executes.
+const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?"
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ if d.errorContext != nil {
+ d.errorContext.Struct = nil
+ // Reuse the allocated space for the FieldStack slice.
+ d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
+ }
+ return d
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = d.addErrorContext(err)
+ }
+}
+
+// addErrorContext returns a new error enhanced with information from d.errorContext
+func (d *decodeState) addErrorContext(err error) error {
+ if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) {
+ switch err := err.(type) {
+ case *UnmarshalTypeError:
+ err.Struct = d.errorContext.Struct.Name()
+ err.Field = strings.Join(d.errorContext.FieldStack, ".")
+ }
+ }
+ return err
+}
+
+// skip scans to the end of what was started.
+func (d *decodeState) skip() {
+ s, data, i := &d.scan, d.data, d.off
+ depth := len(s.parseState)
+ for {
+ op := s.step(s, data[i])
+ i++
+ if len(s.parseState) < depth {
+ d.off = i
+ d.opcode = op
+ return
+ }
+ }
+}
+
+// scanNext processes the byte at d.data[d.off].
+func (d *decodeState) scanNext() {
+ if d.off < len(d.data) {
+ d.opcode = d.scan.step(&d.scan, d.data[d.off])
+ d.off++
+ } else {
+ d.opcode = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ }
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+func (d *decodeState) scanWhile(op int) {
+ s, data, i := &d.scan, d.data, d.off
+ for i < len(data) {
+ newOp := s.step(s, data[i])
+ i++
+ if newOp != op {
+ d.opcode = newOp
+ d.off = i
+ return
+ }
+ }
+
+ d.off = len(data) + 1 // mark processed EOF with len+1
+ d.opcode = d.scan.eof()
+}
+
+// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the
+// common case where we're decoding a literal. The decoder scans the input
+// twice, once for syntax errors and to check the length of the value, and the
+// second to perform the decoding.
+//
+// Only in the second step do we use decodeState to tokenize literals, so we
+// know there aren't any syntax errors. We can take advantage of that knowledge,
+// and scan a literal's bytes much more quickly.
+func (d *decodeState) rescanLiteral() {
+ data, i := d.data, d.off
+Switch:
+ switch data[i-1] {
+ case '"': // string
+ for ; i < len(data); i++ {
+ switch data[i] {
+ case '\\':
+ i++ // escaped char
+ case '"':
+ i++ // tokenize the closing quote too
+ break Switch
+ }
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
+ for ; i < len(data); i++ {
+ switch data[i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ '.', 'e', 'E', '+', '-':
+ default:
+ break Switch
+ }
+ }
+ case 't': // true
+ i += len("rue")
+ case 'f': // false
+ i += len("alse")
+ case 'n': // null
+ i += len("ull")
+ }
+ if i < len(data) {
+ d.opcode = stateEndValue(&d.scan, data[i])
+ } else {
+ d.opcode = scanEnd
+ }
+ d.off = i + 1
+}
+
+// value consumes a JSON value from d.data[d.off-1:], decoding into v, and
+// reads the following byte ahead. If v is invalid, the value is discarded.
+// The first byte of the value has been read already.
+func (d *decodeState) value(v reflect.Value) error {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+
+ case scanBeginArray:
+ if v.IsValid() {
+ if err := d.array(v); err != nil {
+ return err
+ }
+ } else {
+ d.skip()
+ }
+ d.scanNext()
+
+ case scanBeginObject:
+ if v.IsValid() {
+ if err := d.object(v); err != nil {
+ return err
+ }
+ } else {
+ d.skip()
+ }
+ d.scanNext()
+
+ case scanBeginLiteral:
+ // All bytes inside literal return scanContinue op code.
+ start := d.readIndex()
+ d.rescanLiteral()
+
+ if v.IsValid() {
+ if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() any {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+
+ case scanBeginArray, scanBeginObject:
+ d.skip()
+ d.scanNext()
+
+ case scanBeginLiteral:
+ v := d.literalInterface()
+ switch v.(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// If it encounters an Unmarshaler, indirect stops and returns that.
+// If decodingNull is true, indirect stops at the first settable pointer so it
+// can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // Issue #24153 indicates that it is generally not a guaranteed property
+ // that you may round-trip a reflect.Value by calling Value.Addr().Elem()
+ // and expect the value to still be settable for values derived from
+ // unexported embedded struct fields.
+ //
+ // The logic below effectively does this when it first addresses the value
+ // (to satisfy possible pointer methods) and continues to dereference
+ // subsequent pointers as necessary.
+ //
+ // After the first round-trip, we set v back to the original value to
+ // preserve the original RW flags contained in reflect.Value.
+ v0 := v
+ haveAddr := false
+
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() {
+ haveAddr = true
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) {
+ haveAddr = false
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Pointer {
+ break
+ }
+
+ if decodingNull && v.CanSet() {
+ break
+ }
+
+ // Prevent infinite loop if v is an interface pointing to its own address:
+ // var v interface{}
+ // v = &v
+ if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
+ v = v.Elem()
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 && v.CanInterface() {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if !decodingNull {
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ }
+
+ if haveAddr {
+ v = v0 // restore original value after round-trip Value.Addr().Elem()
+ haveAddr = false
+ } else {
+ v = v.Elem()
+ }
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into v.
+// The first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) error {
+ // Check for unmarshaler.
+ u, ut, pv := indirect(v, false)
+ if u != nil {
+ start := d.readIndex()
+ d.skip()
+ return u.UnmarshalJSON(d.data[start:d.off])
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ ai := d.arrayInterface()
+ v.Set(reflect.ValueOf(ai))
+ return nil
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ case reflect.Array, reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndArray {
+ break
+ }
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ if err := d.value(v.Index(i)); err != nil {
+ return err
+ }
+ } else {
+ // Ran out of fixed array: skip.
+ if err := d.value(reflect.Value{}); err != nil {
+ return err
+ }
+ }
+ i++
+
+ // Next token must be , or ].
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndArray {
+ break
+ }
+ if d.opcode != scanArrayValue {
+ panic(phasePanicMsg)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+ return nil
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into v.
+// The first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) error {
+ // Check for unmarshaler.
+ u, ut, pv := indirect(v, false)
+ if u != nil {
+ start := d.readIndex()
+ d.skip()
+ return u.UnmarshalJSON(d.data[start:d.off])
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ v = pv
+ t := v.Type()
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ oi := d.objectInterface()
+ v.Set(reflect.ValueOf(oi))
+ return nil
+ }
+
+ var fields structFields
+
+ // Check type of target:
+ // struct or
+ // map[T1]T2 where T1 is string, an integer type,
+ // or an encoding.TextUnmarshaler
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind, have an integer kind,
+ // or be an encoding.TextUnmarshaler.
+ switch t.Key().Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ default:
+ if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+ fields = cachedTypeFields(t)
+ // ok
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+
+ var mapElem reflect.Value
+ var origErrorContext errorContext
+ if d.errorContext != nil {
+ origErrorContext = *d.errorContext
+ }
+
+ var keys []string
+
+ for {
+ // Read opening " of string key or closing }.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if d.opcode != scanBeginLiteral {
+ panic(phasePanicMsg)
+ }
+
+ // Read key.
+ start := d.readIndex()
+ d.rescanLiteral()
+ item := d.data[start:d.readIndex()]
+ key, ok := unquoteBytes(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+
+ keys = append(keys, string(key))
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := t.Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ if i, ok := fields.nameIndex[string(key)]; ok {
+ // Found an exact name match.
+ f = &fields.list[i]
+ } else {
+ // Fall back to the expensive case-insensitive
+ // linear search.
+ for i := range fields.list {
+ ff := &fields.list[i]
+ if ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Pointer {
+ if subv.IsNil() {
+ // If a struct embeds a pointer to an unexported type,
+ // it is not possible to set a newly allocated value
+ // since the field is unexported.
+ //
+ // See https://golang.org/issue/21357
+ if !subv.CanSet() {
+ d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem()))
+ // Invalidate subv to ensure d.value(subv) skips over
+ // the JSON value without assigning it to subv.
+ subv = reflect.Value{}
+ destring = false
+ break
+ }
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
+ d.errorContext.Struct = t
+ } else if d.disallowUnknownFields {
+ d.saveError(fmt.Errorf("json: unknown field %q", key))
+ }
+ }
+
+ // Read : before value.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode != scanObjectKey {
+ panic(phasePanicMsg)
+ }
+ d.scanWhile(scanSkipSpace)
+
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ if err := d.literalStore(nullLiteral, subv, false); err != nil {
+ return err
+ }
+ case string:
+ if err := d.literalStore([]byte(qv), subv, true); err != nil {
+ return err
+ }
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ if err := d.value(subv); err != nil {
+ return err
+ }
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := t.Key()
+ var kv reflect.Value
+ switch {
+ case reflect.PointerTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(kt)
+ if err := d.literalStore(item, kv, true); err != nil {
+ return err
+ }
+ kv = kv.Elem()
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(kt)
+ default:
+ switch kt.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s := string(key)
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || reflect.Zero(kt).OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s := string(key)
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || reflect.Zero(kt).OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ }
+ if kv.IsValid() {
+ v.SetMapIndex(kv, subv)
+ }
+ }
+
+ // Next token must be , or }.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.errorContext != nil {
+ // Reset errorContext to its original state.
+ // Keep the same underlying array for FieldStack, to reuse the
+ // space and avoid unnecessary allocs.
+ d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
+ d.errorContext.Struct = origErrorContext.Struct
+ }
+ if d.opcode == scanEndObject {
+ break
+ }
+ if d.opcode != scanObjectValue {
+ panic(phasePanicMsg)
+ }
+ }
+
+ if v.Kind() == reflect.Map {
+ d.lastKeys = keys
+ }
+ return nil
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (any, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return nil
+ }
+ isNull := item[0] == 'n' // null
+ u, ut, pv := indirect(v, isNull)
+ if u != nil {
+ return u.UnmarshalJSON(item)
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return nil
+ }
+ val := "number"
+ switch item[0] {
+ case 'n':
+ val = "null"
+ case 't', 'f':
+ val = "bool"
+ }
+ d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
+ return nil
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ return ut.UnmarshalText(s)
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ // The main parser checks that only true and false can reach here,
+ // but if this was a quoted string input, it could be anything.
+ if fromQuoted && string(item) != "null" {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ break
+ }
+ switch v.Kind() {
+ case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := item[0] == 't'
+ // The main parser checks that only true and false can reach here,
+ // but if this was a quoted string input, it could be anything.
+ if fromQuoted && string(item) != "true" && string(item) != "false" {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ break
+ }
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ if v.Type() == numberType && !isValidNumber(string(s)) {
+ return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
+ }
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ // s must be a valid number, because it's
+ // already been tokenized.
+ v.SetString(s)
+ break
+ }
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+ return nil
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() (val any) {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+ case scanBeginArray:
+ val = d.arrayInterface()
+ d.scanNext()
+ case scanBeginObject:
+ val = d.objectInterface()
+ d.scanNext()
+ case scanBeginLiteral:
+ val = d.literalInterface()
+ }
+ return
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []any {
+ var v = make([]any, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndArray {
+ break
+ }
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndArray {
+ break
+ }
+ if d.opcode != scanArrayValue {
+ panic(phasePanicMsg)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]any {
+ m := make(map[string]any)
+ for {
+ // Read opening " of string key or closing }.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if d.opcode != scanBeginLiteral {
+ panic(phasePanicMsg)
+ }
+
+ // Read string key.
+ start := d.readIndex()
+ d.rescanLiteral()
+ item := d.data[start:d.readIndex()]
+ key, ok := unquote(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+
+ // Read : before value.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode != scanObjectKey {
+ panic(phasePanicMsg)
+ }
+ d.scanWhile(scanSkipSpace)
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndObject {
+ break
+ }
+ if d.opcode != scanObjectValue {
+ panic(phasePanicMsg)
+ }
+ }
+ return m
+}
+
+// literalInterface consumes and returns a literal from d.data[d.off-1:] and
+// it reads the following byte ahead. The first byte of the literal has been
+// read already (that's how the caller knows it's a literal).
+func (d *decodeState) literalInterface() any {
+ // All bytes inside literal return scanContinue op code.
+ start := d.readIndex()
+ d.rescanLiteral()
+
+ item := d.data[start:d.readIndex()]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ panic(phasePanicMsg)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ var r rune
+ for _, c := range s[2:6] {
+ switch {
+ case '0' <= c && c <= '9':
+ c = c - '0'
+ case 'a' <= c && c <= 'f':
+ c = c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ c = c - 'A' + 10
+ default:
+ return -1
+ }
+ r = r*16 + rune(c)
+ }
+ return r
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
new file mode 100644
index 0000000000..2e6eca4487
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
@@ -0,0 +1,1486 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 7159. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method and encodes the result as a JSON string.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// So that the JSON will be safe to embed inside HTML