diff --git a/go.mod b/go.mod
index 18cc5c1d2c..779ed92957 100644
--- a/go.mod
+++ b/go.mod
@@ -21,14 +21,14 @@ require (
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
golang.org/x/net v0.26.0
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/api v0.30.2
- k8s.io/apimachinery v0.30.2
+ k8s.io/api v0.31.0-alpha.2
+ k8s.io/apimachinery v0.31.0-alpha.2 // replaced with v0.30.2 in replace() block below
k8s.io/code-generator v0.30.2
- k8s.io/component-base v0.30.2
+ k8s.io/component-base v0.31.0-alpha.2 // v0.30.2 uses outdated and build breaking type from prometheus. update in v0.31
k8s.io/klog/v2 v2.130.1
k8s.io/kube-proxy v0.30.2
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0
- sigs.k8s.io/controller-runtime v0.16.0
+ sigs.k8s.io/controller-runtime v0.18.4
)
require (
@@ -114,13 +114,13 @@ require (
github.com/openshift/library-go v0.0.0-20240621150525-4bb4238aef81
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc
k8s.io/apiextensions-apiserver v0.30.2
- k8s.io/client-go v0.30.2
+ k8s.io/client-go v0.31.0-alpha.2
)
require (
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.44.204 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@@ -133,7 +133,7 @@ require (
github.com/coreos/ignition/v2 v2.15.0 // indirect
github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch/v5 v5.6.0 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/cel-go v0.20.1 // indirect
@@ -141,7 +141,6 @@ require (
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
- github.com/onsi/ginkgo/v2 v2.17.3 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
@@ -153,8 +152,14 @@ require (
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
k8s.io/apiserver v0.30.2 // indirect
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313 // indirect
k8s.io/kms v0.30.2 // indirect
k8s.io/kube-aggregator v0.30.1 // indirect
)
+
+replace (
+ github.com/google/cel-go => github.com/google/cel-go v0.17.8
+ k8s.io/apimachinery => k8s.io/apimachinery v0.30.2
+)
diff --git a/go.sum b/go.sum
index e418e63482..e09774c461 100644
--- a/go.sum
+++ b/go.sum
@@ -31,8 +31,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc=
github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
-github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
-github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c h1:icme0QhxrgZOxTBnT6K8dfGLwbKWSOVwPB95XTbo8Ws=
@@ -94,8 +94,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
-github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
@@ -159,8 +159,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
-github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
+github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
+github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -208,7 +208,6 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
@@ -252,8 +251,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
-github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
+github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
@@ -269,7 +268,6 @@ github.com/openshift/library-go v0.0.0-20240621150525-4bb4238aef81/go.mod h1:PdA
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc h1:m8c26gPEv0p621926Kl26kYQvnkOrW7pOirvYzORn24=
github.com/openshift/machine-config-operator v0.0.1-0.20231002195040-a2469941c0dc/go.mod h1:ftCpVtU6Q31exB0DTBn9s2eu90RJESOSisNAruWkvcE=
github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=
@@ -612,6 +610,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
@@ -635,20 +635,20 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
+k8s.io/api v0.31.0-alpha.2 h1:azMbpAFERqtGmgDtg/f7efnxgPBW+8ieyHNKxT97EMI=
+k8s.io/api v0.31.0-alpha.2/go.mod h1:S1X5UjUV8NZmR1vmKIkUpruhr0AWAvocZVZ5zxKMvi4=
k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw=
k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/client-go v0.31.0-alpha.2 h1:13UCBphjOLcqQ1ROBA+y9sr9Bmc/Ss1ypHQEDb6uKas=
+k8s.io/client-go v0.31.0-alpha.2/go.mod h1:wF4N5QBYqOoXntvUsYd5eyfDLqskc/UNDyEF6WvaFIk=
k8s.io/code-generator v0.30.2 h1:ZY1+aGkqZVwKIyGsOzquaeZ5rSfE6wZHur8z3jQAaiw=
k8s.io/code-generator v0.30.2/go.mod h1:RQP5L67QxqgkVquk704CyvWFIq0e6RCMmLTXxjE8dVA=
-k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII=
-k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE=
+k8s.io/component-base v0.31.0-alpha.2 h1:bAYhaSt++Mf7x0042QkeKJpzOuMq3KP7WGiLIM2hBcA=
+k8s.io/component-base v0.31.0-alpha.2/go.mod h1:4RdlW5OL0oab6gWaGWjxIcgORwuiuO49gV2GSxJ/9io=
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313 h1:bKcdZJOPICVmIIuaM9+MXmapE94dn5AYv5ODs1jA43o=
k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
@@ -668,8 +668,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
-sigs.k8s.io/controller-runtime v0.16.0 h1:5koYaaRVBHDr0LZAJjO5dWzUjMsh6cwa7q1Mmusrdvk=
-sigs.k8s.io/controller-runtime v0.16.0/go.mod h1:77DnuwA8+J7AO0njzv3wbNlMOnGuLrwFr8JPNwx3J7g=
+sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
+sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek=
diff --git a/pkg/client/operatorclient.go b/pkg/client/operatorclient.go
index 9a6ef7ea4b..a4963563c8 100644
--- a/pkg/client/operatorclient.go
+++ b/pkg/client/operatorclient.go
@@ -78,3 +78,12 @@ func (c *OperatorHelperClient) UpdateOperatorStatus(ctx context.Context, resourc
return &ret.Status.OperatorStatus, nil
}
+
+func (c *OperatorHelperClient) GetOperatorStateWithQuorum(ctx context.Context) (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) {
+ instance, err := c.client.Get(ctx, names.OPERATOR_CONFIG, metav1.GetOptions{})
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ return &instance.Spec.OperatorSpec, &instance.Status.OperatorStatus, instance.ResourceVersion, nil
+}
diff --git a/pkg/controller/connectivitycheck/connectivity_check_controller.go b/pkg/controller/connectivitycheck/connectivity_check_controller.go
index ba8d60e7e9..a1a14a9a63 100644
--- a/pkg/controller/connectivitycheck/connectivity_check_controller.go
+++ b/pkg/controller/connectivitycheck/connectivity_check_controller.go
@@ -206,7 +206,14 @@ func (c *connectivityCheckTemplateProvider) generate(ctx context.Context, syncCo
if c.connectivityChecksStatus != currentStatus {
condition := currentStatus
condition.LastTransitionTime = metav1.NewTime(time.Now())
- netConfig := applyconfigv1.Network(names.CLUSTER_CONFIG).WithStatus(applyconfigv1.NetworkStatus().WithConditions(condition))
+ applyCondition := &applyconfigmetav1.ConditionApplyConfiguration{
+ Type: &condition.Type,
+ Status: &condition.Status,
+ LastTransitionTime: &condition.LastTransitionTime,
+ Reason: &condition.Reason,
+ Message: &condition.Message,
+ }
+ netConfig := applyconfigv1.Network(names.CLUSTER_CONFIG).WithStatus(applyconfigv1.NetworkStatus().WithConditions(applyCondition))
_, err := c.configClient.ConfigV1().Networks().Apply(context.TODO(), netConfig, metav1.ApplyOptions{
Force: true,
FieldManager: "cluster-network-operator/connectivity-check-controller",
diff --git a/pkg/controller/operconfig/cluster.go b/pkg/controller/operconfig/cluster.go
index 611b971f3f..9446abb2ba 100644
--- a/pkg/controller/operconfig/cluster.go
+++ b/pkg/controller/operconfig/cluster.go
@@ -3,6 +3,7 @@ package operconfig
import (
"context"
"fmt"
+ apifeatures "github.com/openshift/api/features"
"log"
"reflect"
@@ -24,7 +25,7 @@ import (
// MergeClusterConfig merges in the existing cluster config in to the
// operator config, overwriting any changes to the managed fields.
func (r *ReconcileOperConfig) MergeClusterConfig(ctx context.Context, operConfig *operv1.Network, clusterConfig *configv1.Network) error {
- if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(configv1.FeatureGateNetworkLiveMigration) {
+ if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(apifeatures.FeatureGateNetworkLiveMigration) {
// During network type live migration, all the update to network.operator shall only be handled by the clusterconfig controller
return nil
}
@@ -93,7 +94,7 @@ func (r *ReconcileOperConfig) ClusterNetworkStatus(ctx context.Context, operConf
// Sync status.conditions when live migration is processing
clusterConfigWithConditions := clusterConfig.DeepCopy()
nowTimestamp := metav1.Now()
- if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(configv1.FeatureGateNetworkLiveMigration) {
+ if _, ok := clusterConfig.Annotations[names.NetworkTypeMigrationAnnotation]; ok && r.featureGates.Enabled(apifeatures.FeatureGateNetworkLiveMigration) {
if meta.IsStatusConditionPresentAndEqual(clusterConfig.Status.Conditions, names.NetworkTypeMigrationInProgress, metav1.ConditionTrue) {
err = r.syncNetworkTypeMigrationConditions(ctx, operConfig, clusterConfigWithConditions)
if err != nil {
diff --git a/pkg/controller/operconfig/operconfig_controller.go b/pkg/controller/operconfig/operconfig_controller.go
index 8e00c5d49e..13f661e88d 100644
--- a/pkg/controller/operconfig/operconfig_controller.go
+++ b/pkg/controller/operconfig/operconfig_controller.go
@@ -220,7 +220,7 @@ func add(mgr manager.Manager, r *ReconcileOperConfig) error {
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &corev1.Node{}),
- handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
+ handler.TypedEnqueueRequestsFromMapFunc[*corev1.Node](reconcileOperConfig),
nodePredicate,
); err != nil {
return err
@@ -609,7 +609,7 @@ func updateIPsecMetric(newOperConfigSpec *operv1.NetworkSpec) {
}
}
-func reconcileOperConfig(ctx context.Context, obj crclient.Object) []reconcile.Request {
+func reconcileOperConfig(ctx context.Context, obj *corev1.Node) []reconcile.Request {
log.Printf("%s %s/%s changed, triggering operconf reconciliation", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetNamespace(), obj.GetName())
// Update reconcile.Request object to align with unnamespaced default network,
// to ensure we don't have multiple requeueing reconcilers running
diff --git a/pkg/controller/pki/pki_controller.go b/pkg/controller/pki/pki_controller.go
index 1b21996883..e7fb5c3e5d 100644
--- a/pkg/controller/pki/pki_controller.go
+++ b/pkg/controller/pki/pki_controller.go
@@ -56,7 +56,7 @@ func Add(mgr manager.Manager, status *statusmanager.StatusManager, _ cnoclient.C
}
// Watch for changes to primary resource PKI.network.operator.openshift.io/v1
- err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.OperatorPKI{}), &handler.EnqueueRequestForObject{})
+ err = c.Watch(source.Kind(mgr.GetCache(), &netopv1.OperatorPKI{}, &handler.TypedEnqueueRequestForObject[*netopv1.OperatorPKI]{}))
if err != nil {
return err
}
@@ -193,9 +193,11 @@ func newPKI(config *netopv1.OperatorPKI, clientset *kubernetes.Clientset, mgr ma
cont := certrotation.NewCertRotationController(
fmt.Sprintf("%s/%s", config.Namespace, config.Name), // name, not really used
certrotation.RotatedSigningCASecret{
- Namespace: config.Namespace,
- Name: config.Name + "-ca",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ Namespace: config.Namespace,
+ Name: config.Name + "-ca",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
Validity: 10 * OneYear,
Refresh: 9 * OneYear,
Informer: inf.Core().V1().Secrets(),
@@ -204,23 +206,26 @@ func newPKI(config *netopv1.OperatorPKI, clientset *kubernetes.Clientset, mgr ma
EventRecorder: &eventrecorder.LoggingRecorder{},
},
certrotation.CABundleConfigMap{
- Namespace: config.Namespace,
- Name: config.Name + "-ca",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ Namespace: config.Namespace,
+ Name: config.Name + "-ca",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
Lister: inf.Core().V1().ConfigMaps().Lister(),
Informer: inf.Core().V1().ConfigMaps(),
Client: clientset.CoreV1(),
EventRecorder: &eventrecorder.LoggingRecorder{},
},
certrotation.RotatedSelfSignedCertKeySecret{
- Namespace: config.Namespace,
- Name: config.Name + "-cert",
- JiraComponent: names.ClusterNetworkOperatorJiraComponent,
- Validity: OneYear / 2,
- Refresh: OneYear / 4,
+ Namespace: config.Namespace,
+ Name: config.Name + "-cert",
+ AdditionalAnnotations: certrotation.AdditionalAnnotations{
+ JiraComponent: names.ClusterNetworkOperatorJiraComponent,
+ },
+ Validity: OneYear / 2,
+ Refresh: OneYear / 4,
CertCreator: &certrotation.ServingRotation{
Hostnames: func() []string { return []string{spec.TargetCert.CommonName} },
-
// Force the certificate to also be client
CertificateExtensionFn: []crypto.CertificateExtensionFunc{
toClientCert,
diff --git a/pkg/network/ovn_kubernetes.go b/pkg/network/ovn_kubernetes.go
index 2b7679d23c..a4d592c8b3 100644
--- a/pkg/network/ovn_kubernetes.go
+++ b/pkg/network/ovn_kubernetes.go
@@ -19,6 +19,7 @@ import (
yaml "github.com/ghodss/yaml"
configv1 "github.com/openshift/api/config/v1"
+ apifeatures "github.com/openshift/api/features"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/bootstrap"
cnoclient "github.com/openshift/cluster-network-operator/pkg/client"
@@ -304,8 +305,8 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
}
// leverage feature gates
- data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(configv1.FeatureGateAdminNetworkPolicy)
- data.Data["DNS_NAME_RESOLVER_ENABLE"] = featureGates.Enabled(configv1.FeatureGateDNSNameResolver)
+ data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateAdminNetworkPolicy)
+ data.Data["DNS_NAME_RESOLVER_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateDNSNameResolver)
data.Data["ReachabilityTotalTimeoutSeconds"] = c.EgressIPConfig.ReachabilityTotalTimeoutSeconds
diff --git a/pkg/network/render.go b/pkg/network/render.go
index b73a582e52..fbc4761b28 100644
--- a/pkg/network/render.go
+++ b/pkg/network/render.go
@@ -1,6 +1,7 @@
package network
import (
+ apifeatures "github.com/openshift/api/features"
"log"
"net"
"os"
@@ -623,7 +624,7 @@ func renderCRDForMigration(conf *operv1.NetworkSpec, manifestDir string, feature
// the CRD installation can happen according to whether the feature gate is enabled or not
// in the cluster
data := render.MakeRenderData()
- data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(configv1.FeatureGateAdminNetworkPolicy)
+ data.Data["OVN_ADMIN_NETWORK_POLICY_ENABLE"] = featureGates.Enabled(apifeatures.FeatureGateAdminNetworkPolicy)
manifests, err := render.RenderTemplate(filepath.Join(manifestDir, "network/ovn-kubernetes/common/001-crd.yaml"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render OVNKubernetes CRDs")
diff --git a/pkg/util/validation/network.go b/pkg/util/validation/network.go
index 359d02879e..b721eedc22 100644
--- a/pkg/util/validation/network.go
+++ b/pkg/util/validation/network.go
@@ -3,6 +3,7 @@ package validation
import (
"errors"
"fmt"
+ "k8s.io/apimachinery/pkg/util/validation/field"
"net"
"net/url"
"strconv"
@@ -40,7 +41,7 @@ func Subdomain(v string) error {
// Host validates if host is a valid IP address or subdomain in DNS (RFC 1123).
func Host(host string) error {
errDomain := DomainName(host, false)
- errIP := validation.IsValidIP(host)
+ errIP := validation.IsValidIP(field.NewPath(""), host)
if errDomain != nil && errIP != nil {
return fmt.Errorf("invalid host: %s", host)
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
new file mode 100644
index 0000000000..52cf18e425
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
new file mode 100644
index 0000000000..ab51212676
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
@@ -0,0 +1,68 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system.
+
+Here is a general template for an ANTLR based recognizer in Go:
+
+ .
+ ├── myproject
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.0-complete.jar
+ │ ├── error_listeners.go
+ │ ├── generate.go
+ │ ├── generate.sh
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options.
+
+From the command line at the root of your package “myproject” you can then simply issue the command:
+
+ go generate ./...
+
+# Copyright Notice
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+*/
+package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
similarity index 94%
rename from vendor/github.com/antlr4-go/antlr/v4/atn.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
index e749ebd0cf..98010d2e6e 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
@@ -4,6 +4,8 @@
package antlr
+import "sync"
+
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
var ATNInvalidAltNumber int
@@ -18,11 +20,10 @@ var ATNInvalidAltNumber int
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
-
- // DecisionToState is the decision points for all rules, sub-rules, optional
- // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
// can go back later and build DFA predictors for them. This includes
- // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ // all the rules, subrules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -50,13 +51,11 @@ type ATN struct {
// specified, and otherwise is nil.
ruleToTokenType []int
- // ATNStates is a list of all states in the ATN, ordered by state number.
- //
states []ATNState
- mu Mutex
- stateMu RWMutex
- edgeMu RWMutex
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
}
// NewATN returns a new ATN struct representing the given grammarType and is used
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
new file mode 100644
index 0000000000..7619fa172e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
@@ -0,0 +1,303 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ Equals(o Collectable[ATNConfig]) bool
+ Hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
+ if b == o {
+ return true
+ } else if o == nil {
+ return false
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.Equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.Equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for BaseATNConfig, when no specialist hash function
+// is required for a collection
+func (b *BaseATNConfig) Hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.Hash())
+ h = murmurUpdate(h, l.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
+ if l == other {
+ return true
+ }
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.Equals(othert.BaseATNConfig)
+}
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
new file mode 100644
index 0000000000..43e9b33f3b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
@@ -0,0 +1,441 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type ATNConfigSet interface {
+ Hash() int
+ Equals(o Collectable[ATNConfig]) bool
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() *JStore[ATNState, Comparator[ATNState]]
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare is a hack function just to verify that adding DFAstares to the known
+// set works, so long as comparison of ATNConfigSet s works. For that to work, we
+// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
+// know the order, so we do this inefficient hack. If this proves the point, then
+// we can change the config set to a better structure.
+func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+
+ for _, c := range b.configs {
+ found := false
+ for _, c2 := range bs.configs {
+ if c.Equals(c2) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+
+ }
+ return true
+}
+
+func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *BaseATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
similarity index 86%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
index bdb30b3622..3c975ec7bf 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.readOnly = readOnly
}
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.verifyATN = verifyATN
}
@@ -42,12 +42,11 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
if opts.readOnly {
- panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
}
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
}
-//goland:noinspection GoUnusedExportedFunction
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
index 2dcb9ae11b..3888856b4b 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
@@ -35,7 +35,6 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
return &ATNDeserializer{options: options}
}
-//goland:noinspection GoUnusedFunction
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
@@ -194,7 +193,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
}
}
-func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
m := a.readInt()
// Preallocate the needed capacity.
@@ -351,7 +350,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart.endState = bypassStop
- atn.defineDecisionState(&bypassStart.BaseDecisionState)
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
@@ -451,7 +450,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
continue
}
- // We analyze the [ATN] to determine if an ATN decision state is the
+ // We analyze the ATN to determine if a ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
@@ -554,7 +553,7 @@ func (a *ATNDeserializer) readInt() int {
return int(v) // data is 32 bits but int is at least that big
}
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
similarity index 66%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
index afe6c9f809..41529115fa 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
@@ -4,7 +4,7 @@
package antlr
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
type IATNSimulator interface {
SharedContextCache() *PredictionContextCache
@@ -18,13 +18,22 @@ type BaseATNSimulator struct {
decisionToDFA []*DFA
}
-func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
if b.sharedContextCache == nil {
return context
}
- //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
- visited := NewVisitRecord()
+ visited := make(map[PredictionContext]PredictionContext)
+
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
similarity index 65%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
index 2ae5807cdb..1f2a56bc31 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
@@ -4,11 +4,7 @@
package antlr
-import (
- "fmt"
- "os"
- "strconv"
-)
+import "strconv"
// Constants for serialization.
const (
@@ -29,7 +25,6 @@ const (
ATNStateInvalidStateNumber = -1
)
-//goland:noinspection GoUnusedGlobalVariable
var ATNStateInitialNumTransitions = 4
type ATNState interface {
@@ -78,7 +73,7 @@ type BaseATNState struct {
transitions []Transition
}
-func NewATNState() *BaseATNState {
+func NewBaseATNState() *BaseATNState {
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
}
@@ -153,46 +148,27 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) {
if len(as.transitions) == 0 {
as.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
as.epsilonOnlyTransitions = false
}
- // TODO: Check code for already present compared to the Java equivalent
- //alreadyPresent := false
- //for _, t := range as.transitions {
- // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
- // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
- // alreadyPresent = true
- // break
- // }
- // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
- // alreadyPresent = true
- // break
- // }
- //}
- //if !alreadyPresent {
if index == -1 {
as.transitions = append(as.transitions, trans)
} else {
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
// TODO: as.transitions.splice(index, 1, trans)
}
- //} else {
- // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
- //}
}
type BasicState struct {
- BaseATNState
+ *BaseATNState
}
func NewBasicState() *BasicState {
- return &BasicState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
}
type DecisionState interface {
@@ -206,19 +182,13 @@ type DecisionState interface {
}
type BaseDecisionState struct {
- BaseATNState
+ *BaseATNState
decision int
nonGreedy bool
}
func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- }
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
}
func (s *BaseDecisionState) getDecision() int {
@@ -246,20 +216,12 @@ type BlockStartState interface {
// BaseBlockStartState is the start of a regular (...) block.
type BaseBlockStartState struct {
- BaseDecisionState
+ *BaseDecisionState
endState *BlockEndState
}
func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBasic,
- },
- decision: -1,
- },
- }
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
}
func (s *BaseBlockStartState) getEndState() *BlockEndState {
@@ -271,38 +233,31 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
}
type BasicBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewBasicBlockStartState() *BasicBlockStartState {
- return &BasicBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &BasicBlockStartState{}
// BlockEndState is a terminal node of a simple (a|b|c) block.
type BlockEndState struct {
- BaseATNState
+ *BaseATNState
startState ATNState
}
func NewBlockEndState() *BlockEndState {
- return &BlockEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateBlockEnd,
- },
- startState: nil,
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
}
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
@@ -310,48 +265,43 @@ func NewBlockEndState() *BlockEndState {
// encode references to all calls to this rule to compute FOLLOW sets for error
// handling.
type RuleStopState struct {
- BaseATNState
+ *BaseATNState
}
func NewRuleStopState() *RuleStopState {
- return &RuleStopState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStop,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
}
type RuleStartState struct {
- BaseATNState
+ *BaseATNState
stopState ATNState
isPrecedenceRule bool
}
func NewRuleStartState() *RuleStartState {
- return &RuleStartState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateRuleStart,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
}
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
// transitions: one to the loop back to start of the block, and one to exit.
type PlusLoopbackState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewPlusLoopbackState() *PlusLoopbackState {
- return &PlusLoopbackState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusLoopBack,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
}
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
@@ -359,103 +309,85 @@ func NewPlusLoopbackState() *PlusLoopbackState {
// it is included for completeness. In reality, PlusLoopbackState is the real
// decision-making node for A+.
type PlusBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
loopBackState ATNState
}
func NewPlusBlockStartState() *PlusBlockStartState {
- return &PlusBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStatePlusBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &PlusBlockStartState{}
// StarBlockStartState is the block that begins a closure loop.
type StarBlockStartState struct {
- BaseBlockStartState
+ *BaseBlockStartState
}
func NewStarBlockStartState() *StarBlockStartState {
- return &StarBlockStartState{
- BaseBlockStartState: BaseBlockStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarBlockStart,
- },
- },
- },
- }
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
}
var _ BlockStartState = &StarBlockStartState{}
type StarLoopbackState struct {
- BaseATNState
+ *BaseATNState
}
func NewStarLoopbackState() *StarLoopbackState {
- return &StarLoopbackState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopBack,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
}
type StarLoopEntryState struct {
- BaseDecisionState
+ *BaseDecisionState
loopBackState ATNState
precedenceRuleDecision bool
}
func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateStarLoopEntry,
- },
- },
- }
+ return &StarLoopEntryState{BaseDecisionState: b}
}
// LoopEndState marks the end of a * or + loop.
type LoopEndState struct {
- BaseATNState
+ *BaseATNState
loopBackState ATNState
}
func NewLoopEndState() *LoopEndState {
- return &LoopEndState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateLoopEnd,
- },
- }
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
}
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
type TokensStartState struct {
- BaseDecisionState
+ *BaseDecisionState
}
func NewTokensStartState() *TokensStartState {
- return &TokensStartState{
- BaseDecisionState: BaseDecisionState{
- BaseATNState: BaseATNState{
- stateNumber: ATNStateInvalidStateNumber,
- stateType: ATNStateTokenStart,
- },
- },
- }
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/atn_type.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
similarity index 89%
rename from vendor/github.com/antlr4-go/antlr/v4/char_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
index bd8127b6b5..c33f0adb5e 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
@@ -8,5 +8,5 @@ type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
- GetTextFromInterval(Interval) string
+ GetTextFromInterval(*Interval) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
similarity index 100%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
similarity index 88%
rename from vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
index b75da9df08..c6c9485a20 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
@@ -28,24 +28,22 @@ type CommonTokenStream struct {
// trivial with bt field.
fetchedEOF bool
- // index into [tokens] of the current token (next token to consume).
+ // index indexs into tokens of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
- // see the documentation of [IntStream] for a description of initializing methods.
+ // see the documentation of IntStream for a description of initializing methods.
index int
- // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // tokenSource is the TokenSource from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
- // tokens contains all tokens fetched from the token source. The list is considered a
+ // tokens is all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
-// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
-// tokens and will pull tokens from the given lexer channel.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
@@ -55,7 +53,6 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
}
}
-// GetAllTokens returns all tokens currently pulled from the token source.
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
@@ -64,11 +61,9 @@ func (c *CommonTokenStream) Mark() int {
return 0
}
-func (c *CommonTokenStream) Release(_ int) {}
+func (c *CommonTokenStream) Release(marker int) {}
-func (c *CommonTokenStream) Reset() {
- c.fetchedEOF = false
- c.tokens = make([]Token, 0)
+func (c *CommonTokenStream) reset() {
c.Seek(0)
}
@@ -112,7 +107,7 @@ func (c *CommonTokenStream) Consume() {
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // How many more elements do we need?
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
@@ -198,13 +193,12 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
- c.fetchedEOF = false
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between 'i' and [TokenEOF].
-func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
c.Sync(i)
if i >= len(c.tokens) {
@@ -250,7 +244,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
- // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
var to int
if nextOnChannel == -1 {
@@ -320,8 +314,7 @@ func (c *CommonTokenStream) Index() int {
}
func (c *CommonTokenStream) GetAllText() string {
- c.Fill()
- return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+ return c.GetTextFromInterval(nil)
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
@@ -336,9 +329,15 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
return c.GetTextFromInterval(interval.GetSourceInterval())
}
-func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
- c.Sync(interval.Stop)
+
+ if interval == nil {
+ c.Fill()
+ interval = NewInterval(0, len(c.tokens)-1)
+ } else {
+ c.Sync(interval.Stop)
+ }
start := interval.Start
stop := interval.Stop
diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
similarity index 82%
rename from vendor/github.com/antlr4-go/antlr/v4/comparators.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
index 7467e9b43d..9ea3200536 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/comparators.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
@@ -18,20 +18,17 @@ package antlr
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
-// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
- aStateEqInst = &ObjEqComparator[ATNState]{}
- aConfEqInst = &ObjEqComparator[*ATNConfig]{}
-
- // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
- aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
- atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[ATNConfig]{}
+ aConfCompInst = &ATNConfigComparator[ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
- atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
- pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
)
// Equals2 delegates to the Equals() method of type T
@@ -47,14 +44,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
type SemCComparator[T Collectable[T]] struct{}
-// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -75,8 +72,7 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
-
+func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
@@ -89,7 +85,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -109,21 +105,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
-// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
-func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -145,6 +141,7 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
-func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
+
return o.Hash()
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
similarity index 76%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
index 6b63eb1589..bfd43e1f73 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
@@ -4,8 +4,6 @@
package antlr
-// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
-// reach and the transitions between them.
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -14,9 +12,10 @@ type DFA struct {
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
- // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
- // to see if they really are the same object. Hence, we have our own map storage.
+ // to see if they really are the same object.
+ //
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
@@ -33,11 +32,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: nil, // Lazy initialize
+ states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
@@ -96,11 +95,12 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.states = nil // Lazy initialize
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
d.numstates = 0
if precedenceDfa {
- precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
@@ -113,31 +113,6 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
-// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
-// instantiation of the states JMap.
-func (d *DFA) Len() int {
- if d.states == nil {
- return 0
- }
- return d.states.Len()
-}
-
-// Get returns a state that matches s if it is present in the DFA state set. We defer to this
-// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
-func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- return nil, false
- }
- return d.states.Get(s)
-}
-
-func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
- if d.states == nil {
- d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
- }
- return d.states.Put(s)
-}
-
func (d *DFA) getS0() *DFAState {
return d.s0
}
@@ -146,11 +121,9 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
- if d.states == nil {
- return []*DFAState{}
- }
+
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
index 0e11009899..84d0a31e53 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// DFASerializer is a DFA walker that knows how to dump them to serialized
// strings.
type DFASerializer struct {
dfa *DFA
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
similarity index 81%
rename from vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
index 6541430745..c90dec55c8 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
@@ -22,31 +22,30 @@ func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
-// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1, a2,..an, the DFA is in a state that represents the
+// after reading input a1a2..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an."
-//
-// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
-// states the [ATN] could be in. We need to track the alt predicted by each state
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
-// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
-// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
// (if any) followed to arrive at that state.
//
-// A [DFAState] may have multiple references to a particular state, but with
-// different [ATN] contexts (with same or different alts) meaning that state was
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
- configs *ATNConfigSet
+ configs ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
@@ -54,7 +53,7 @@ type DFAState struct {
isAcceptState bool
- // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // prediction is the ttype we match or alt we predict if the state is accept.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
@@ -82,9 +81,9 @@ type DFAState struct {
predicates []*PredPrediction
}
-func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
if configs == nil {
- configs = NewATNConfigSet(false)
+ configs = NewBaseATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
@@ -95,7 +94,7 @@ func (d *DFAState) GetAltSet() []int {
var alts []int
if d.configs != nil {
- for _, c := range d.configs.configs {
+ for _, c := range d.configs.GetItems() {
alts = append(alts, c.GetAlt())
}
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
similarity index 92%
rename from vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
index bd2cd8bc3a..c55bcc19b2 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
@@ -33,7 +33,6 @@ type DiagnosticErrorListener struct {
exactOnly bool
}
-//goland:noinspection GoUnusedExportedFunction
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
@@ -43,7 +42,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
return n
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -56,7 +55,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -65,7 +64,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
@@ -97,12 +96,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
- for _, c := range set.configs {
+ for _, c := range set.GetItems() {
result.add(c.GetAlt())
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
similarity index 62%
rename from vendor/github.com/antlr4-go/antlr/v4/error_listener.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
index 21a0216434..f679f0dcd5 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
@@ -16,29 +16,28 @@ import (
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}
type DefaultErrorListener struct {
}
-//goland:noinspection GoUnusedExportedFunction
func NewDefaultErrorListener() *DefaultErrorListener {
return new(DefaultErrorListener)
}
-func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
}
-func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
}
-func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
}
type ConsoleErrorListener struct {
@@ -49,16 +48,21 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
-// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+// Provides a default instance of {@link ConsoleErrorListener}.
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-// SyntaxError prints messages to System.err containing the
-// values of line, charPositionInLine, and msg using
-// the following format:
+// {@inheritDoc}
//
-// line
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format. The default implementation simply calls {@link //endErrorCondition}. The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table. The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule. Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g., If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule. During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible. ORIGINS Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule:
+// out of the entire rules surrounding the loop. So, for rule This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }. This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
+// input error. The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
-// [NotifyErrorListeners]
+// {@link Parser//NotifyErrorListeners}. This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
+// input error. The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}. The default implementation attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, this method panics with [InputMisMatchException}.
-// TODO: Not sure that panic() is the right thing to do here - JI
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}. EXTRA TOKEN (single token deletion) {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
-// the LA(2) token) as the successful result of the Match operation.
+// the {@code LA(2)} token) as the successful result of the Match operation. This recovery strategy is implemented by {@link
+// //singleTokenDeletion}. MISSING TOKEN (single token insertion) If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation. This recovery strategy is implemented by {@link
+// //singleTokenInsertion}. EXAMPLE For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain: This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce this behavior.
+// line line:charPositionInLine msg
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
type ProxyErrorListener struct {
@@ -81,19 +85,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol
}
}
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
similarity index 58%
rename from vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
index 9db2be1c74..5c0a637ba4 100644
--- a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
@@ -21,8 +21,8 @@ type ErrorStrategy interface {
ReportMatch(Parser)
}
-// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
-// error reporting and recovery in ANTLR parsers.
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
@@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
- // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
@@ -62,37 +62,50 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
-func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+//
+// @param recognizer the parser instance
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
-func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+//
+// @param recognizer
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
-// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+// {@inheritDoc}
+//
+//
+//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
@@ -115,10 +128,12 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
}
}
-// Recover is the default recovery implementation.
-// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
-// loosely the set of tokens that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+// {@inheritDoc}
+//
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
//
-// At the start of a sub-rule upon error, Sync performs single
+// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
-// If the sub-rule is optional
-//
-// ({@code (...)?}, {@code (...)*},
+//
+// classfunc : 'class' ID '{' member* '}'
+//
//
// input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
-// This functionality cost a bit of effort because the parser has to
-// compare the token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off this
-// functionality by simply overriding this method as empty:
-//
-// { }
-//
-// [Jim Idle]: https://github.com/jimidle
+//
+// stat &rarr expr &rarr atom
+//
//
-// and it will be trying to Match the ')' at this point in the
+// and it will be trying to Match the {@code ')'} at d point in the
// derivation:
//
-// : ID '=' '(' INT ')' ('+' atom)* ';'
-// ^
+//
+// => ID '=' '(' INT ')' ('+' atom)* ”
+// ^
+//
//
-// The attempt to [Match] ')' will fail when it sees ';' and
-// call [RecoverInline]. To recover, it sees that LA(1)==';'
-// is in the set of tokens that can follow the ')' token reference
-// in rule atom. It can assume that you forgot the ')'.
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
@@ -362,24 +396,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
return d.GetMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
- recognizer.SetError(NewInputMisMatchException(recognizer))
- return nil
+ panic(NewInputMisMatchException(recognizer))
}
-// SingleTokenInsertion implements the single-token insertion inline error recovery
-// strategy. It is called by [RecoverInline] if the single-token
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
-// This method determines whether single-token insertion is viable by
-// checking if the LA(1) input symbol could be successfully Matched
-// if it were instead the LA(2) symbol. If this method returns
+//
If the single-token deletion is successful, d method calls +// {@link //ReportUnwantedToken} to Report the error, followed by +// {@link Parser//consume} to actually "delete" the extraneous token. Then, +// before returning {@link //ReportMatch} is called to signal a successful +// Match.
// -// The func returns the successfully Matched [Token] instance if single-token -// deletion successfully recovers from the mismatched input, otherwise nil. +// @param recognizer the parser instance +// @return the successfully Matched {@link Token} instance if single-token +// deletion successfully recovers from the mismatched input, otherwise +// {@code nil} func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { NextTokenType := recognizer.GetTokenStream().LA(2) expecting := d.GetExpectedTokens(recognizer) @@ -431,28 +467,24 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { return nil } -// GetMissingSymbol conjures up a missing token during error recovery. +// Conjure up a missing token during error recovery. // // The recognizer attempts to recover from single missing // symbols. But, actions might refer to that missing symbol. -// For example: -// -// x=ID {f($x)}. -// -// The action clearly assumes +// For example, x=ID {f($x)}. The action clearly assumes // that there has been an identifier Matched previously and that // $x points at that token. If that token is missing, but // the next token in the stream is what we want we assume that -// this token is missing, and we keep going. Because we +// d token is missing and we keep going. Because we // have to return some token to replace the missing token, // we have to conjure one up. This method gives the user control // over the tokens returned for missing tokens. Mostly, // you will want to create something special for identifier // tokens. For literals such as '{' and ',', the default // action in the parser or tree parser works. It simply creates -// a [CommonToken] of the appropriate type. The text will be the token name. -// If you need to change which tokens must be created by the lexer, -// override this method to create the appropriate tokens. +// a CommonToken of the appropriate type. The text will be the token. +// If you change what tokens must be created by the lexer, +// override d method to create the appropriate tokens. func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { currentSymbol := recognizer.GetCurrentToken() expecting := d.GetExpectedTokens(recognizer) @@ -466,7 +498,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { if expectedTokenType > 0 && expectedTokenType < len(ln) { tokenText = "+// This error strategy is useful in the following scenarios.
// -// - Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the [BailErrorStrategy] avoids wasting work on recovering from errors -// when the result will be ignored either way. +//+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
// -// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] +// @see Parser//setErrorHandler(ANTLRErrorStrategy) + type BailErrorStrategy struct { *DefaultErrorStrategy } var _ ErrorStrategy = &BailErrorStrategy{} -//goland:noinspection GoUnusedExportedFunction func NewBailErrorStrategy() *BailErrorStrategy { b := new(BailErrorStrategy) @@ -672,10 +703,10 @@ func NewBailErrorStrategy() *BailErrorStrategy { return b } -// Recover Instead of recovering from exception e, re-panic it wrapped -// in a [ParseCancellationException] so it is not caught by the -// rule func catches. Use Exception.GetCause() to get the -// original [RecognitionException]. +// Instead of recovering from exception {@code e}, re-panic it wrapped +// in a {@link ParseCancellationException} so it is not caught by the +// rule func catches. Use {@link Exception//getCause()} to get the +// original {@link RecognitionException}. func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context := recognizer.GetParserRuleContext() for context != nil { @@ -686,10 +717,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context = nil } } - recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly + panic(NewParseCancellationException()) // TODO we don't emit e properly } -// RecoverInline makes sure we don't attempt to recover inline if the parser +// Make sure we don't attempt to recover inline if the parser // successfully recovers, it won't panic an exception. func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { b.Recover(recognizer, NewInputMisMatchException(recognizer)) @@ -697,6 +728,7 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { return nil } -// Sync makes sure we don't attempt to recover from problems in sub-rules. -func (b *BailErrorStrategy) Sync(_ Parser) { +// Make sure we don't attempt to recover from problems in subrules.// +func (b *BailErrorStrategy) Sync(recognizer Parser) { + // pass } diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go similarity index 73% rename from vendor/github.com/antlr4-go/antlr/v4/errors.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go index 8f0f2f601f..3954c13782 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/errors.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go @@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In // } else { // stack := NewError().stack // } - // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int + // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int t := new(BaseRecognitionException) @@ -43,17 +43,15 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In t.recognizer = recognizer t.input = input t.ctx = ctx - - // The current Token when an error occurred. Since not all streams + // The current {@link Token} when an error occurred. Since not all streams // support accessing symbols by index, we have to track the {@link Token} // instance itself. - // t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the - // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. - // + // occurred. For {@link NoViableAltException} and + // {@link LexerNoViableAltException} exceptions, this is the + // {@link DecisionState} number. For others, it is the state whose outgoing + // edge we couldn't Match. t.offendingState = -1 if t.recognizer != nil { t.offendingState = t.recognizer.GetState() @@ -76,15 +74,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream { //If the state number is not known, b method returns -1.
-// getExpectedTokens gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time this exception was raised. +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. // -// If the set of expected tokens is not known and could not be computed, -// this method returns nil. +//If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.
// -// The func returns the set of token types that could potentially follow the current -// state in the {ATN}, or nil if the information is not available. - +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { if b.recognizer != nil { return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) @@ -101,10 +99,10 @@ type LexerNoViableAltException struct { *BaseRecognitionException startIndex int - deadEndConfigs *ATNConfigSet + deadEndConfigs ATNConfigSet } -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { l := new(LexerNoViableAltException) @@ -130,16 +128,14 @@ type NoViableAltException struct { startToken Token offendingToken Token ctx ParserRuleContext - deadEndConfigs *ATNConfigSet + deadEndConfigs ATNConfigSet } -// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// Indicates that the parser could not decide which of two or more paths // to take based upon the remaining input. It tracks the starting token // of the offending input and also knows where the parser was -// in the various paths when the error. -// -// Reported by [ReportNoViableAlternative] -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { +// in the various paths when the error. Reported by ReportNoViableAlternative() +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { if ctx == nil { ctx = recognizer.GetParserRuleContext() @@ -161,14 +157,12 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1) + // input.LT(1)?// n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. - // - // At the time the error occurred, of course the stream needs to keep a - // buffer of all the tokens, but later we might not have access to those. + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) n.startToken = startToken n.offendingToken = offendingToken @@ -179,7 +173,7 @@ type InputMisMatchException struct { *BaseRecognitionException } -// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// This signifies any kind of mismatched input exceptions such as // when the current input does not Match the expected token. func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { @@ -192,10 +186,11 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { } -// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// A semantic predicate failed during validation. Validation of predicates // occurs when normally parsing the alternative just like Matching a token. // Disambiguating predicate evaluation occurs when we test a predicate during // prediction. + type FailedPredicateException struct { *BaseRecognitionException @@ -204,7 +199,6 @@ type FailedPredicateException struct { predicate string } -//goland:noinspection GoUnusedExportedFunction func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { f := new(FailedPredicateException) @@ -237,21 +231,6 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri type ParseCancellationException struct { } -func (p ParseCancellationException) GetOffendingToken() Token { - //TODO implement me - panic("implement me") -} - -func (p ParseCancellationException) GetMessage() string { - //TODO implement me - panic("implement me") -} - -func (p ParseCancellationException) GetInputStream() IntStream { - //TODO implement me - panic("implement me") -} - func NewParseCancellationException() *ParseCancellationException { // Error.call(this) // Error.captureStackTrace(this, ParseCancellationException) diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go similarity index 52% rename from vendor/github.com/antlr4-go/antlr/v4/file_stream.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go index 5f65f809be..bd6ad5efe3 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go @@ -5,7 +5,8 @@ package antlr import ( - "bufio" + "bytes" + "io" "os" ) @@ -13,53 +14,34 @@ import ( // when you construct the object. type FileStream struct { - InputStream + *InputStream + filename string } -//goland:noinspection GoUnusedExportedFunction func NewFileStream(fileName string) (*FileStream, error) { + buf := bytes.NewBuffer(nil) + f, err := os.Open(fileName) if err != nil { return nil, err } - - defer func(f *os.File) { - errF := f.Close() - if errF != nil { - } - }(f) - - reader := bufio.NewReader(f) - fInfo, err := f.Stat() + defer f.Close() + _, err = io.Copy(buf, f) if err != nil { return nil, err } - fs := &FileStream{ - InputStream: InputStream{ - index: 0, - name: fileName, - }, - filename: fileName, - } + fs := new(FileStream) - // Pre-build the buffer and read runes efficiently - // - fs.data = make([]rune, 0, fInfo.Size()) - for { - r, _, err := reader.ReadRune() - if err != nil { - break - } - fs.data = append(fs.data, r) - } - fs.size = len(fs.data) // Size in runes + fs.filename = fileName + s := string(buf.Bytes()) + + fs.InputStream = NewInputStream(s) - // All done. - // return fs, nil + } func (f *FileStream) GetSourceName() string { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go new file mode 100644 index 0000000000..a8b889cedb --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go @@ -0,0 +1,113 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type InputStream struct { + name string + index int + data []rune + size int +} + +func NewInputStream(data string) *InputStream { + + is := new(InputStream) + + is.name = "The {@code Skip} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerSkipAction struct { *BaseLexerAction } @@ -90,22 +73,17 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. +// Provides a singleton instance of l parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } -// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } -func (b *LexerSkipAction) Equals(other LexerAction) bool { - return other.getActionType() == LexerActionTypeSkip -} - // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -147,10 +125,11 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// LexerPushModeAction implements the pushMode lexer action by calling -// [Lexer.pushMode] with the assigned mode. +// Implements the {@code pushMode} lexer action by calling +// {@link Lexer//pushMode} with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction + mode int } @@ -190,10 +169,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. +// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. // -// The popMode command does not have any parameters, so this action is -// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] +//The {@code popMode} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.
type LexerPopModeAction struct { *BaseLexerAction } @@ -245,10 +224,11 @@ func (l *LexerMoreAction) String() string { return "more" } -// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with +// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with // the assigned mode. type LexerModeAction struct { *BaseLexerAction + mode int } @@ -342,19 +322,16 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// LexerChannelAction implements the channel lexer action by calling -// [Lexer.setChannel] with the assigned channel. -// -// Constructs a new channel action with the specified channel value. +// Implements the {@code channel} lexer action by calling +// {@link Lexer//setChannel} with the assigned channel. +// Constructs a New{@code channel} action with the specified channel value. +// @param channel The channel value to pass to {@link Lexer//setChannel}. type LexerChannelAction struct { *BaseLexerAction + channel int } -// NewLexerChannelAction creates a channel lexer action by calling -// [Lexer.setChannel] with the assigned channel. -// -// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -398,22 +375,25 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}. +// Constructs a Newindexed custom action by associating a character offset +// with a {@link LexerAction}. +// +//Note: This class is only required for lexer actions for which +// {@link LexerAction//isPositionDependent} returns {@code true}.
+// +// @param offset The offset into the input {@link CharStream}, relative to +// the token start index, at which the specified lexer action should be +// executed. +// @param action The lexer action to execute at a particular offset in the +// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction + offset int lexerAction LexerAction isPositionDependent bool } -// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset -// with a [LexerAction]. -// -// Note: This class is only required for lexer actions for which -// [LexerAction.isPositionDependent] returns true. -// -// The offset points into the input [CharStream], relative to -// the token start index, at which the specified lexerAction should be -// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go similarity index 70% rename from vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go index dfc28c32b3..be1ba7a7e3 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go @@ -29,20 +29,28 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { l.lexerActions = lexerActions // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link ATNConfig//hashCode} operation. - l.cachedHash = murmurInit(0) + // of the performance-critical {@link LexerATNConfig//hashCode} operation. + l.cachedHash = murmurInit(57) for _, a := range lexerActions { l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) } - l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) return l } -// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for -// the input [LexerActionExecutor] followed by a specified -// [LexerAction]. -// TODO: This does not match the Java code +// Creates a {@link LexerActionExecutor} which executes the actions for +// the input {@code lexerActionExecutor} followed by a specified +// {@code lexerAction}. +// +// @param lexerActionExecutor The executor for actions already traversed by +// the lexer while Matching a token within a particular +// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// though it were an empty executor. +// @param lexerAction The lexer action to execute after the actions +// specified in {@code lexerActionExecutor}. +// +// @return A {@link LexerActionExecutor} for executing the combine actions +// of {@code lexerActionExecutor} and {@code lexerAction}. func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { if lexerActionExecutor == nil { return NewLexerActionExecutor([]LexerAction{lexerAction}) @@ -51,42 +59,47 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) } -// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// Creates a {@link LexerActionExecutor} which encodes the current offset // for position-dependent lexer actions. // -// Normally, when the executor encounters lexer actions where -// [LexerAction.isPositionDependent] returns true, it calls -// [IntStream.Seek] on the input [CharStream] to set the input -// position to the end of the current token. This behavior provides -// for efficient [DFA] representation of lexer actions which appear at the end +//Normally, when the executor encounters lexer actions where +// {@link LexerAction//isPositionDependent} returns {@code true}, it calls +// {@link IntStream//seek} on the input {@link CharStream} to set the input +// position to the end of the current token. This behavior provides +// for efficient DFA representation of lexer actions which appear at the end // of a lexer rule, even when the lexer rule Matches a variable number of -// characters. +// characters.
// -// Prior to traversing a Match transition in the [ATN], the current offset +//Prior to traversing a Match transition in the ATN, the current offset // from the token start index is assigned to all position-dependent lexer // actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the [DFA] representation of +// the offsets relative to the token start index, the DFA representation of // lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same Length, regardless of their absolute -// position in the input stream. +// to sharing among tokens of the same length, regardless of their absolute +// position in the input stream.
// -// If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns this instance. +//If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns {@code this}.
// -// The offset is assigned to all position-dependent +// @param offset The current offset to assign to all position-dependent // lexer actions which do not already have offsets assigned. // -// The func returns a [LexerActionExecutor] that stores input stream offsets +// @return A {@link LexerActionExecutor} which stores input stream offsets // for all position-dependent lexer actions. +// / func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { var updatedLexerActions []LexerAction for i := 0; i < len(l.lexerActions); i++ { _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) if l.lexerActions[i].getIsPositionDependent() && !ok { if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) - updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + updatedLexerActions = make([]LexerAction, 0) + + for _, a := range l.lexerActions { + updatedLexerActions = append(updatedLexerActions, a) + } } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) } } diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go index fe938b0259..c573b75210 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go @@ -10,8 +10,10 @@ import ( "strings" ) -//goland:noinspection GoUnusedGlobalVariable var ( + LexerATNSimulatorDebug = false + LexerATNSimulatorDFADebug = false + LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -30,11 +32,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - BaseATNSimulator + *BaseATNSimulator recog Lexer predictionMode int - mergeCache *JPCMap2 + mergeCache DoubleDict startIndex int Line int CharPositionInLine int @@ -44,35 +46,27 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := &LexerATNSimulator{ - BaseATNSimulator: BaseATNSimulator{ - atn: atn, - sharedContextCache: sharedContextCache, - }, - } + l := new(LexerATNSimulator) + + l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) l.decisionToDFA = decisionToDFA l.recog = recog - // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - - // line number 1..n within the input + // line number 1..n within the input/// l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1 + // 0..n-1/// l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - + // done return l } @@ -120,7 +114,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -132,7 +126,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -140,18 +134,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-Length tokens + // allow zero-length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -194,7 +188,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes new src/from state + s = target // flip current DFA target becomes Newsrc/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -220,39 +214,43 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if runtimeConfig.lexerATNSimulatorDebug && target != nil { + if LexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the -// computed state and corresponding edge to the [DFA]. +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. // -// The func returns the computed target [DFA] state for the given input symbol t. -// If this does not lead to a valid [DFA] state, this method -// returns ATNSimulatorError. +// @param input The input stream +// @param s The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, l method +// returns {@link //ERROR}. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach, t) + l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a fail-over from DFA later. + // cause a failover from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach) + return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -267,35 +265,34 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations -// we can reach upon input t. -// -// Parameter reach is a return parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { +// Given a starting configuration set, figure out all ATN configurations +// we can reach upon input {@code t}. Parameter {@code reach} is a return +// parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a runtimeConfig that already reached an accept state for the same rule + // than a config that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.configs { - currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt - if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { + for _, cfg := range closure.GetItems() { + currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { continue } - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.lexerActionExecutor + lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := t == TokenEOF - config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) + treatEOFAsEpsilon := (t == TokenEOF) + config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -308,7 +305,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATN } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -328,7 +325,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -339,24 +336,25 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATN return configs } -// closure since the alternatives within any lexer decision are ordered by -// preference, this method stops pursuing the closure as soon as an accept +// Since the alternatives within any lexer decision are ordered by +// preference, l method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from runtimeConfig, all other (potentially reachable) states for -// this rule would have a lower priority. +// search from {@code config}, all other (potentially reachable) states for +// l rule would have a lower priority. // -// The func returns true if an accept state is reached. -func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, +// @return {@code true} if an accept state is reached, otherwise +// {@code false}. +func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if runtimeConfig.lexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") + if LexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") } _, ok := config.state.(*RuleStopState) if ok { - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -403,10 +401,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, - configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, + configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - var cfg *ATNConfig + var cfg *LexerATNConfig if trans.getSerializationType() == TransitionRULE { @@ -437,10 +435,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig pt := trans.(*PredicateTransition) - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.hasSemanticContext = true + configs.SetHasSemanticContext(true) if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -451,7 +449,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In this case, the config needs to be + // isEmpty() is false. In l case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -478,18 +476,26 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig return cfg } -// evaluatePredicate eEvaluates a predicate specified in the lexer. +// Evaluate a predicate specified in the lexer. // -// If speculative is true, this method was called before -// [consume] for the Matched character. This method should call -// [consume] before evaluating the predicate to ensure position -// sensitive values, including [GetText], [GetLine], -// and [GetColumn], properly reflect the current -// lexer state. This method should restore input and the simulator -// to the original state before returning, i.e. undo the actions made by the -// call to [Consume]. +//If {@code speculative} is {@code true}, l method was called before +// {@link //consume} for the Matched character. This method should call +// {@link //consume} before evaluating the predicate to ensure position +// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, +// and {@link Lexer//getcolumn}, properly reflect the current +// lexer state. This method should restore {@code input} and the simulator +// to the original state before returning (i.e. undo the actions made by the +// call to {@link //consume}.
// -// The func returns true if the specified predicate evaluates to true. +// @param input The input stream. +// @param ruleIndex The rule containing the predicate. +// @param predIndex The index of the predicate within the rule. +// @param speculative {@code true} if the current index in {@code input} is +// one character before the predicate's location. +// +// @return {@code true} if the specified predicate evaluates to +// {@code true}. +// / func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -521,7 +527,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -533,9 +539,10 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // - suppressEdge := cfgs.hasSemanticContext - cfgs.hasSemanticContext = false + // / + suppressEdge := cfgs.HasSemanticContext() + cfgs.SetHasSemanticContext(false) + to = l.addDFAState(cfgs, true) if suppressEdge { @@ -547,7 +554,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if runtimeConfig.lexerATNSimulatorDebug { + if LexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -565,12 +572,13 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState *ATNConfig + var firstConfigWithRuleStopState ATNConfig + + for _, cfg := range configs.GetItems() { - for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -580,14 +588,14 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.Get(proposed) + existing, present := dfa.states.Get(proposed) if present { // This state was already present, so just return it. @@ -597,11 +605,10 @@ func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool // We need to add the new state // - proposed.stateNumber = dfa.Len() - configs.readOnly = true - configs.configLookup = nil // Not needed now + proposed.stateNumber = dfa.states.Len() + configs.SetReadOnly(true) proposed.configs = configs - dfa.Put(proposed) + dfa.states.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -613,7 +620,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// GetText returns the text [Match]ed so far for the current token. +// Get the text Matched so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go similarity index 72% rename from vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go index dfdff000bc..76689615a6 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go @@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer { return la } +// - Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +// +// / const ( - // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit - // a predicate during analysis if - // - // seeThruPreds==false LL1AnalyzerHitPred = TokenInvalidType ) @@ -38,13 +38,11 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { count := len(s.GetTransitions()) look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! - lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) - - // Wipe out lookahead for la alternative if we found nothing, + lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + seeThruPreds := false // fail to get lookahead upon pred + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing // or we had a predicate when we !seeThruPreds if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { look[alt] = nil @@ -53,31 +51,32 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -// Look computes the set of tokens that can follow s in the [ATN] in the -// specified ctx. -// -// If ctx is nil and the end of the rule containing -// s is reached, [EPSILON] is added to the result set. -// -// If ctx is not nil and the end of the outermost rule is -// reached, [EOF] is added to the result set. +// * +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. // -// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a -// [BlockEndState] to detect epsilon paths through a closure. +//If {@code ctx} is {@code nil} and the end of the rule containing +// {@code s} is reached, {@link Token//EPSILON} is added to the result set. +// If {@code ctx} is not {@code nil} and the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.
// -// Parameter ctx is the complete parser context, or nil if the context +// @param s the ATN state +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx the complete parser context, or {@code nil} if the context // should be ignored // -// The func returns the set of tokens that can follow s in the [ATN] in the -// specified ctx. +// @return The set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// / func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() - var lookContext *PredictionContext + seeThruPreds := true // ignore preds get all lookahead + var lookContext PredictionContext if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), - NewBitSet(), true, true) + la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) return r } @@ -111,17 +110,16 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], - calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - c := NewATNConfig6(s, 0, ctx) + c := NewBaseATNConfig6(s, 0, ctx) if lookBusy.Contains(c) { return @@ -153,7 +151,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look return } - if ctx.pcType != PredictionContextEmpty { + if ctx != BasePredictionContextEMPTY { removed := calledRuleStack.contains(s.GetRuleIndex()) defer func() { if removed { @@ -204,8 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], - calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go similarity index 80% rename from vendor/github.com/antlr4-go/antlr/v4/parser.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go index fb57ac15db..d26bf06392 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/parser.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go @@ -48,10 +48,8 @@ type BaseParser struct { _SyntaxErrors int } -// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error -// recovery stuff. -// -//goland:noinspection GoUnusedExportedFunction +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// func NewBaseParser(input TokenStream) *BaseParser { p := new(BaseParser) @@ -60,46 +58,39 @@ func NewBaseParser(input TokenStream) *BaseParser { // The input stream. p.input = nil - // The error handling strategy for the parser. The default value is a new // instance of {@link DefaultErrorStrategy}. p.errHandler = NewDefaultErrorStrategy() p.precedenceStack = make([]int, 0) p.precedenceStack.Push(0) - - // The ParserRuleContext object for the currently executing rule. + // The {@link ParserRuleContext} object for the currently executing rule. // p.is always non-nil during the parsing process. p.ctx = nil - - // Specifies whether the parser should construct a parse tree during + // Specifies whether or not the parser should construct a parse tree during // the parsing process. The default value is {@code true}. p.BuildParseTrees = true - - // When setTrace(true) is called, a reference to the - // TraceListener is stored here, so it can be easily removed in a - // later call to setTrace(false). The listener itself is + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is // implemented as a parser listener so p.field is not directly used by // other parser methods. p.tracer = nil - - // The list of ParseTreeListener listeners registered to receive + // The list of {@link ParseTreeListener} listeners registered to receive // events during the parse. p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time NotifyErrorListeners is called. + // incremented each time {@link //NotifyErrorListeners} is called. p._SyntaxErrors = 0 p.SetInputStream(input) return p } -// This field maps from the serialized ATN string to the deserialized [ATN] with +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with // bypass alternatives. // -// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] -// -//goland:noinspection GoUnusedGlobalVariable +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// @@ -152,13 +143,10 @@ func (p *BaseParser) Match(ttype int) Token { p.Consume() } else { t = p.errHandler.RecoverInline(p) - if p.HasError() { - return nil - } if p.BuildParseTrees && t.GetTokenIndex() == -1 { - - // we must have conjured up a new token during single token - // insertion if it's not the current symbol + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -190,8 +178,9 @@ func (p *BaseParser) MatchWildcard() Token { } else { t = p.errHandler.RecoverInline(p) if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a new token during single token - // insertion if it's not the current symbol + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -213,27 +202,33 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { return p.parseListeners } -// AddParseListener registers listener to receive events during the parsing process. +// Registers {@code listener} to receive events during the parsing process. // -// To support output-preserving grammar transformations (including but not +//To support output-preserving grammar transformations (including but not // limited to left-recursion removal, automated left-factoring, and // optimized code generation), calls to listener methods during the parse // may differ substantially from calls made by -// [ParseTreeWalker.DEFAULT] used after the parse is complete. In +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In // particular, rule entry and exit events may occur in a different order // during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted. +// rule entry methods may be omitted.
+// +//With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.
+// +//If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.
+// @param listener the listener to remove func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -278,7 +274,7 @@ func (p *BaseParser) removeParseListeners() { p.parseListeners = nil } -// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. +// Notify any parse listeners of an enter rule event. func (p *BaseParser) TriggerEnterRuleEvent() { if p.parseListeners != nil { ctx := p.ctx @@ -289,7 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener func (p *BaseParser) TriggerExitRuleEvent() { if p.parseListeners != nil { // reverse order walk of listeners @@ -316,16 +314,19 @@ func (p *BaseParser) GetTokenFactory() TokenFactory { return p.input.GetTokenSource().GetTokenFactory() } -// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. +// Tell our token source and error strategy about a Newway to create tokens.// func (p *BaseParser) setTokenFactory(factory TokenFactory) { p.input.GetTokenSource().setTokenFactory(factory) } -// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it +// The ATN with bypass alternatives is expensive to create so we create it // lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. func (p *BaseParser) GetATNWithBypassAlts() { - // TODO - Implement this? + // TODO panic("Not implemented!") // serializedAtn := p.getSerializedATN() @@ -353,7 +354,6 @@ func (p *BaseParser) GetATNWithBypassAlts() { // String id = m.Get("ID") // -//goland:noinspection GoUnusedParameter func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { panic("NewParseTreePatternMatcher not implemented!") @@ -386,16 +386,14 @@ func (p *BaseParser) GetTokenStream() TokenStream { return p.input } -// SetTokenStream installs input as the token stream and resets the parser. +// Set the token stream and reset the parser.// func (p *BaseParser) SetTokenStream(input TokenStream) { p.input = nil p.reset() p.input = input } -// GetCurrentToken returns the current token at LT(1). -// -// [Match] needs to return the current input symbol, which gets put +// Match needs to return the current input symbol, which gets put // into the label for the associated token ref e.g., x=ID. func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) @@ -448,7 +446,7 @@ func (p *BaseParser) addContextToParseTree() { } } -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { p.SetState(state) p.ctx = localctx p.ctx.SetStart(p.input.LT(1)) @@ -476,7 +474,7 @@ func (p *BaseParser) ExitRule() { func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { localctx.SetAltNumber(altNum) - // if we have a new localctx, make sure we replace existing ctx + // if we have Newlocalctx, make sure we replace existing ctx // that is previous child of parse tree if p.BuildParseTrees && p.ctx != localctx { if p.ctx.GetParent() != nil { @@ -500,7 +498,7 @@ func (p *BaseParser) GetPrecedence() int { return p.precedenceStack[len(p.precedenceStack)-1] } -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { p.SetState(state) p.precedenceStack.Push(precedence) p.ctx = localctx @@ -514,7 +512,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, pr // // Like {@link //EnterRule} but for recursive rules. -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { previous := p.ctx previous.SetParent(localctx) previous.SetInvokingState(state) @@ -532,7 +530,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, } func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - _, _ = p.precedenceStack.Pop() + p.precedenceStack.Pop() p.ctx.SetStop(p.input.LT(-1)) retCtx := p.ctx // save current ctx (return value) // unroll so ctx is as it was before call to recursive method @@ -563,22 +561,29 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { return nil } -func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { return precedence >= p.precedenceStack[len(p.precedenceStack)-1] } -//goland:noinspection GoUnusedParameter func (p *BaseParser) inContext(context ParserRuleContext) bool { // TODO: useful in parser? return false } -// IsExpectedToken checks whether symbol can follow the current state in the -// {ATN}. The behavior of p.method is equivalent to the following, but is +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is // implemented such that the complete context-sensitive follow set does not // need to be explicitly constructed. // -// return getExpectedTokens().contains(symbol) +//+// return getExpectedTokens().contains(symbol) +//+// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + func (p *BaseParser) IsExpectedToken(symbol int) bool { atn := p.Interpreter.atn ctx := p.ctx @@ -606,9 +611,11 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { return false } -// GetExpectedTokens and returns the set of input symbols which could follow the current parser -// state and context, as given by [GetState] and [GetContext], +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, // respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } @@ -619,7 +626,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { return atn.NextTokens(s, nil) } -// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// func (p *BaseParser) GetRuleIndex(ruleName string) int { var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] if ok { @@ -629,10 +636,13 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int { return -1 } -// GetRuleInvocationStack returns a list of the rule names in your parser instance +// Return List<String> of the rule names in your parser instance // leading up to a call to the current rule. You could override if // you want more details such as the file/line info of where // in the ATN a rule is invoked. +// +// this very useful for error messages. + func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { if c == nil { c = p.ctx @@ -658,16 +668,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { return stack } -// GetDFAStrings returns a list of all DFA states used for debugging purposes +// For debugging and other purposes.// func (p *BaseParser) GetDFAStrings() string { return fmt.Sprint(p.Interpreter.decisionToDFA) } -// DumpDFA prints the whole of the DFA for debugging +// For debugging and other purposes.// func (p *BaseParser) DumpDFA() { seenOne := false for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.Len() > 0 { + if dfa.states.Len() > 0 { if seenOne { fmt.Println() } @@ -682,10 +692,8 @@ func (p *BaseParser) GetSourceName() string { return p.GrammarFileName } -// SetTrace installs a trace listener for the parse. -// -// During a parse it is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. This is for quick and dirty debugging. +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. func (p *BaseParser) SetTrace(trace *TraceListener) { if trace == nil { p.RemoveParseListener(p.tracer) diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go similarity index 64% rename from vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go index 724fa17a19..8bcc46a0d9 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go @@ -10,49 +10,31 @@ import ( "strings" ) -// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over -// a standard JStore so that we can use Lazy instantiation of the JStore, mostly -// to avoid polluting the stats module with a ton of JStore instances with nothing in them. -type ClosureBusy struct { - bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] - desc string -} - -// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules -func NewClosureBusy(desc string) *ClosureBusy { - return &ClosureBusy{ - desc: desc, - } -} - -func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { - if c.bMap == nil { - c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) - } - return c.bMap.Put(config) -} +var ( + ParserATNSimulatorDebug = false + ParserATNSimulatorTraceATNSim = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false + TurnOffLRLoopEntryBranchOpt = false +) type ParserATNSimulator struct { - BaseATNSimulator + *BaseATNSimulator parser Parser predictionMode int input TokenStream startIndex int dfa *DFA - mergeCache *JPCMap + mergeCache *DoubleDict outerContext ParserRuleContext } -//goland:noinspection GoUnusedExportedFunction func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - p := &ParserATNSimulator{ - BaseATNSimulator: BaseATNSimulator{ - atn: atn, - sharedContextCache: sharedContextCache, - }, - } + p := new(ParserATNSimulator) + + p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) p.parser = parser p.decisionToDFA = decisionToDFA @@ -64,12 +46,12 @@ func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, shared p.outerContext = nil p.dfa = nil // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. [JPCMap] - // isn't Synchronized, but we're ok since two threads shouldn't reuse same - // parser/atn-simulator object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid - // the merge if we ever see a and b again. Note that (b,a) -> c should - // also be examined during cache lookup. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't Synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. // p.mergeCache = nil @@ -87,14 +69,14 @@ func (p *ParserATNSimulator) SetPredictionMode(v int) { func (p *ParserATNSimulator) reset() { } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { +func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + " exec LA(1)==" + p.getLookaheadName(input) + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) } + p.input = input p.startIndex = input.Index() p.outerContext = outerContext @@ -106,15 +88,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre defer func() { p.dfa = nil - p.mergeCache = nil // whack cache after each prediction - // Do not attempt to run a GC now that we're done with the cache as makes the - // GC overhead terrible for badly formed grammars and has little effect on well formed - // grammars. - // I have made some extra effort to try and reduce memory pressure by reusing allocations when - // possible. However, it can only have a limited effect. The real solution is to encourage grammar - // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect - // what is happening at runtime, along with using the error listener to report ambiguities. - + p.mergeCache = nil // wack cache after each prediction input.Seek(index) input.Release(m) }() @@ -139,7 +113,7 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre if outerContext == nil { outerContext = ParserRuleContextEmpty } - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + " exec LA(1)==" + p.getLookaheadName(input) + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) @@ -168,52 +142,47 @@ func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStre p.atn.stateMu.Unlock() } - alt, re := p.execATN(dfa, s0, input, index, outerContext) - parser.SetError(re) - if runtimeConfig.parserATNSimulatorDebug { + alt := p.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorDebug { fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) } return alt } -// execATN performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. -// +// Performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. + // There are some key conditions we're looking for after computing a new // set of ATN configs (proposed DFA state): -// -// - If the set is empty, there is no viable alternative for current symbol -// - Does the state uniquely predict an alternative? -// - Does the state have a conflict that would prevent us from -// putting it on the work list? -// +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? + // We also have some key operations to do: +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// Reporting a conflict +// Reporting an ambiguity +// Reporting a context sensitivity +// Reporting insufficient predicates + +// cover these cases: // -// - Add an edge from previous DFA state to potentially NewDFA state, D, -// - Upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// - Collecting predicates and adding semantic context to DFA accept states -// - adding rule context to context-sensitive DFA accept states -// - Consuming an input symbol -// - Reporting a conflict -// - Reporting an ambiguity -// - Reporting a context sensitivity -// - Reporting insufficient predicates -// -// Cover these cases: -// -// - dead end -// - single alt -// - single alt + predicates -// - conflict -// - conflict + predicates -// -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { - - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + ", DFA state " + s0.String() + ", LA(1)==" + p.getLookaheadName(input) + @@ -222,7 +191,7 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, previousD := s0 - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("s0 = " + s0.String()) } t := input.LA(1) @@ -245,17 +214,17 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) if alt != ATNInvalidAltNumber { - return alt, nil + return alt } - p.parser.SetError(e) - return ATNInvalidAltNumber, e + + panic(e) } if D.requiresFullContext && p.predictionMode != PredictionModeSLL { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.conflictingAlts + conflictingAlts := D.configs.GetConflictingAlts() if D.predicates != nil { - if runtimeConfig.parserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL fail-over") + if ParserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL failover") } conflictIndex := input.Index() if conflictIndex != startIndex { @@ -263,10 +232,10 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, } conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) if conflictingAlts.length() == 1 { - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("Full LL avoided") } - return conflictingAlts.minValue(), nil + return conflictingAlts.minValue() } if conflictIndex != startIndex { // restore the index so Reporting the fallback to full @@ -274,18 +243,18 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(conflictIndex) } } - if runtimeConfig.parserATNSimulatorDFADebug { + if ParserATNSimulatorDFADebug { fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) } fullCtx := true s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt, re + alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt } if D.isAcceptState { if D.predicates == nil { - return D.prediction, nil + return D.prediction } stopIndex := input.Index() input.Seek(startIndex) @@ -293,13 +262,13 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, switch alts.length() { case 0: - return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) + panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) case 1: - return alts.minValue(), nil + return alts.minValue() default: // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue(), nil + return alts.minValue() } } previousD = D @@ -345,8 +314,7 @@ func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) // @return The computed target DFA state for the given input symbol // {@code t}. If {@code t} does not lead to a valid DFA state, p method // returns {@link //ERROR}. -// -//goland:noinspection GoBoolExpressions + func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { reach := p.computeReachSet(previousD.configs, t, false) @@ -354,12 +322,12 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) return ATNSimulatorError } - // create new target state we'll add to DFA after it's complete + // create Newtarget state we'll add to DFA after it's complete D := NewDFAState(-1, reach) predictedAlt := p.getUniqueAlt(reach) - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { altSubSets := PredictionModegetConflictingAltSubsets(reach) fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + ", previous=" + previousD.configs.String() + @@ -372,17 +340,17 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t if predictedAlt != ATNInvalidAltNumber { // NO CONFLICT, UNIQUELY PREDICTED ALT D.isAcceptState = true - D.configs.uniqueAlt = predictedAlt + D.configs.SetUniqueAlt(predictedAlt) D.setPrediction(predictedAlt) } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.conflictingAlts = p.getConflictingAlts(reach) + D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) D.requiresFullContext = true // in SLL-only mode, we will stop at p state and return the minimum alt D.isAcceptState = true - D.setPrediction(D.configs.conflictingAlts.minValue()) + D.setPrediction(D.configs.GetConflictingAlts().minValue()) } - if D.isAcceptState && D.configs.hasSemanticContext { + if D.isAcceptState && D.configs.HasSemanticContext() { p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) if D.predicates != nil { D.setPrediction(ATNInvalidAltNumber) @@ -413,17 +381,15 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState } // comes back with reach.uniqueAlt set to a valid alt -// -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("execATNWithFullContext " + s0.String()) } fullCtx := true foundExactAmbig := false - var reach *ATNConfigSet + var reach ATNConfigSet previous := s0 input.Seek(startIndex) t := input.LA(1) @@ -441,23 +407,25 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A // ATN states in SLL implies LL will also get nowhere. // If conflict in states that dip out, choose min since we // will get error no matter what. + e := p.noViableAlt(input, outerContext, previous, startIndex) input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) if alt != ATNInvalidAltNumber { - return alt, nil + return alt } - return alt, p.noViableAlt(input, outerContext, previous, startIndex) + + panic(e) } altSubSets := PredictionModegetConflictingAltSubsets(reach) - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) } - reach.uniqueAlt = p.getUniqueAlt(reach) + reach.SetUniqueAlt(p.getUniqueAlt(reach)) // unique prediction? - if reach.uniqueAlt != ATNInvalidAltNumber { - predictedAlt = reach.uniqueAlt + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + predictedAlt = reach.GetUniqueAlt() break } if p.predictionMode != PredictionModeLLExactAmbigDetection { @@ -486,9 +454,9 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A // If the configuration set uniquely predicts an alternative, // without conflict, then we know that it's a full LL decision // not SLL. - if reach.uniqueAlt != ATNInvalidAltNumber { + if reach.GetUniqueAlt() != ATNInvalidAltNumber { p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt, nil + return predictedAlt } // We do not check predicates here because we have checked them // on-the-fly when doing full context prediction. @@ -501,10 +469,10 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A // // For example, we might know that we have conflicting configurations. // But, that does not mean that there is no way forward without a - // conflict. It's possible to have non-conflicting alt subsets as in: - // + // conflict. It's possible to have nonconflicting alt subsets as in: + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - // + // from // // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), @@ -519,15 +487,14 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *A p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - return predictedAlt, nil + return predictedAlt } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { +func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { if p.mergeCache == nil { - p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") + p.mergeCache = NewDoubleDict() } - intermediate := NewATNConfigSet(fullCtx) + intermediate := NewBaseATNConfigSet(fullCtx) // Configurations already in a rule stop state indicate reaching the end // of the decision rule (local context) or end of the start rule (full @@ -539,18 +506,18 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC // ensure that the alternative Matching the longest overall sequence is // chosen when multiple such configurations can Match the input. - var skippedStopStates []*ATNConfig + var skippedStopStates []*BaseATNConfig // First figure out where we can reach on input t - for _, c := range closure.configs { - if runtimeConfig.parserATNSimulatorDebug { + for _, c := range closure.GetItems() { + if ParserATNSimulatorDebug { fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) } if _, ok := c.GetState().(*RuleStopState); ok { if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c) - if runtimeConfig.parserATNSimulatorDebug { + skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) + if ParserATNSimulatorDebug { fmt.Println("added " + c.String() + " to SkippedStopStates") } } @@ -560,9 +527,9 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC for _, trans := range c.GetState().GetTransitions() { target := p.getReachableTarget(trans, t) if target != nil { - cfg := NewATNConfig4(c, target) + cfg := NewBaseATNConfig4(c, target) intermediate.Add(cfg, p.mergeCache) - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("added " + cfg.String() + " to intermediate") } } @@ -570,7 +537,7 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC } // Now figure out where the reach operation can take us... - var reach *ATNConfigSet + var reach ATNConfigSet // This block optimizes the reach operation for intermediate sets which // trivially indicate a termination state for the overall @@ -598,8 +565,8 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC // operation on the intermediate set to compute its initial value. // if reach == nil { - reach = NewATNConfigSet(fullCtx) - closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") + reach = NewBaseATNConfigSet(fullCtx) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) treatEOFAsEpsilon := t == TokenEOF amount := len(intermediate.configs) for k := 0; k < amount; k++ { @@ -621,10 +588,10 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC // // This is handled before the configurations in SkippedStopStates, // because any configurations potentially added from that list are - // already guaranteed to meet this condition whether it's + // already guaranteed to meet p condition whether or not it's // required. // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) + reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) } // If SkippedStopStates!=nil, then it contains at least one // configuration. For full-context reach operations, these @@ -640,40 +607,41 @@ func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullC } } - if runtimeConfig.parserATNSimulatorTraceATNSim { + if ParserATNSimulatorTraceATNSim { fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) } - if len(reach.configs) == 0 { + if len(reach.GetItems()) == 0 { return nil } return reach } -// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from -// configs which are in a [RuleStopState]. If all -// configurations in configs are already in a rule stop state, this -// method simply returns configs. +// Return a configuration set containing only the configurations from +// {@code configs} which are in a {@link RuleStopState}. If all +// configurations in {@code configs} are already in a rule stop state, p +// method simply returns {@code configs}. // -// When lookToEndOfRule is true, this method uses -// [ATN].[NextTokens] for each configuration in configs which is +//
When {@code lookToEndOfRule} is true, p method uses +// {@link ATN//NextTokens} for each configuration in {@code configs} which is // not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions. +// from the configuration via epsilon-only transitions.
// -// When lookToEndOfRule is true, this method checks for rule stop states +// @param configs the configuration set to update +// @param lookToEndOfRule when true, p method checks for rule stop states // reachable by epsilon-only transitions from each configuration in -// configs. +// {@code configs}. // -// The func returns configs if all configurations in configs are in a -// rule stop state, otherwise it returns a new configuration set containing only -// the configurations from configs which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { +// @return {@code configs} if all configurations in {@code configs} are in a +// rule stop state, otherwise return a Newconfiguration set containing only +// the configurations from {@code configs} which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { return configs } - result := NewATNConfigSet(configs.fullCtx) - for _, config := range configs.configs { + result := NewBaseATNConfigSet(configs.FullContext()) + for _, config := range configs.GetItems() { if _, ok := config.GetState().(*RuleStopState); ok { result.Add(config, p.mergeCache) continue @@ -682,81 +650,91 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConf NextTokens := p.atn.NextTokens(config.GetState(), nil) if NextTokens.contains(TokenEpsilon) { endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) + result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) } } } return result } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { // always at least the implicit call to start rule initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewATNConfigSet(fullCtx) - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + configs := NewBaseATNConfigSet(fullCtx) + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { fmt.Println("computeStartState from ATN state " + a.String() + " initialContext=" + initialContext.String()) } for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() - c := NewATNConfig6(target, i+1, initialContext) - closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") + c := NewBaseATNConfig6(target, i+1, initialContext) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs } -// applyPrecedenceFilter transforms the start state computed by -// [computeStartState] to the special start state used by a -// precedence [DFA] for a particular precedence value. The transformation +// This method transforms the start state computed by +// {@link //computeStartState} to the special start state used by a +// precedence DFA for a particular precedence value. The transformation // process applies the following changes to the start state's configuration // set. // -// 1. Evaluate the precedence predicates for each configuration using -// [SemanticContext].evalPrecedence. -// 2. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. -// -// Transformation 2 is valid for the following reasons: -// -// - The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule. -// - The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level. -// -// The prediction context must be considered by this filter to address -// situations like the following: -// -// grammar TA -// prog: statement* EOF -// statement: letterA | statement letterA 'b' -// letterA: 'a' +//+// The prediction context must be considered by p filter to address +// situations like the following. +//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+// +// If the above grammar, the ATN state immediately before the token +// reference {@code 'a'} in {@code letterA} is reachable from the left edge // of both the primary and closure blocks of the left-recursive rule -// statement. The prediction context associated with each of these +// {@code statement}. The prediction context associated with each of these // configurations distinguishes between them, and prevents the alternative -// which stepped out to prog, and then back in to statement +// which stepped out to {@code prog} (and then back in to {@code statement} // from being eliminated by the filter. +//
// -// The func returns the transformed configuration set representing the start state -// for a precedence [DFA] at a particular precedence level (determined by -// calling [Parser].getPrecedence). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { +// @param configs The configuration set computed by +// {@link //computeStartState} as the start state for the DFA. +// @return The transformed configuration set representing the start state +// for a precedence DFA at a particular precedence level (determined by +// calling {@link Parser//getPrecedence}). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - statesFromAlt1 := make(map[int]*PredictionContext) - configSet := NewATNConfigSet(configs.fullCtx) + statesFromAlt1 := make(map[int]PredictionContext) + configSet := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.configs { + for _, config := range configs.GetItems() { // handle alt 1 first if config.GetAlt() != 1 { continue @@ -768,12 +746,12 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNCo } statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() if updatedContext != config.GetSemanticContext() { - configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) + configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) } else { configSet.Add(config, p.mergeCache) } } - for _, config := range configs.configs { + for _, config := range configs.GetItems() { if config.GetAlt() == 1 { // already handled @@ -802,11 +780,10 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN return nil } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.configs { + for _, c := range configs.GetItems() { if ambigAlts.contains(c.GetAlt()) { altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) } @@ -820,11 +797,11 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *AT nPredAlts++ } } - // unambiguous alts are nil in altToPred + // nonambig alts are nil in altToPred if nPredAlts == 0 { altToPred = nil } - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) } return altToPred @@ -835,7 +812,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre containsPredicate := false for i := 1; i < len(altToPred); i++ { pred := altToPred[i] - // un-predicated is indicated by SemanticContextNONE + // unpredicated is indicated by SemanticContextNONE if ambigAlts != nil && ambigAlts.contains(i) { pairs = append(pairs, NewPredPrediction(pred, i)) } @@ -849,42 +826,51 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre return pairs } -// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by -// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the -// Error state was reached during [ATN] simulation. +// This method is used to improve the localization of error messages by +// choosing an alternative rather than panicing a +// {@link NoViableAltException} in particular prediction scenarios where the +// {@link //ERROR} state was reached during ATN simulation. // -// The default implementation of this method uses the following -// algorithm to identify an [ATN] configuration which successfully parsed the +//+// The default implementation of p method uses the following +// algorithm to identify an ATN configuration which successfully parsed the // decision entry rule. Choosing such an alternative ensures that the -// [ParserRuleContext] returned by the calling rule will be complete +// {@link ParserRuleContext} returned by the calling rule will be complete // and valid, and the syntax error will be Reported later at a more -// localized location. +// localized location.
// -// - If a syntactically valid path or paths reach the end of the decision rule, and -// they are semantically valid if predicated, return the min associated alt. -// - Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -// - Otherwise, return [ATNInvalidAltNumber]. +//// In some scenarios, the algorithm described above could predict an -// alternative which will result in a [FailedPredicateException] in -// the parser. Specifically, this could occur if the only configuration +// alternative which will result in a {@link FailedPredicateException} in +// the parser. Specifically, p could occur if the only configuration // capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing this alternative within -// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting -// [FailedPredicateException] in the parser will identify the specific +// blocked by a semantic predicate. By choosing p alternative within +// {@link //AdaptivePredict} instead of panicing a +// {@link NoViableAltException}, the resulting +// {@link FailedPredicateException} in the parser will identify the specific // predicate which is preventing the parser from successfully parsing the // decision rule, which helps developers identify and correct logic errors // in semantic predicates. +//
// -// pass in the configs holding ATN configurations which were valid immediately before -// the ERROR state was reached, outerContext as the initial parser context from the paper +// @param configs The ATN configurations which were valid immediately before +// the {@link //ERROR} state was reached +// @param outerContext The is the \gamma_0 initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// The func returns the value to return from [AdaptivePredict], or -// [ATNInvalidAltNumber] if a suitable alternative was not -// identified and [AdaptivePredict] should report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { +// @return The value to return from {@link //AdaptivePredict}, or +// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not +// identified and {@link //AdaptivePredict} should Report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) semValidConfigs := cfgs[0] semInvalidConfigs := cfgs[1] @@ -893,7 +879,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return alt } // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.configs) > 0 { + if len(semInvalidConfigs.GetItems()) > 0 { alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) if alt != ATNInvalidAltNumber { // syntactically viable path exists return alt @@ -902,10 +888,10 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return ATNInvalidAltNumber } -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { alts := NewIntervalSet() - for _, c := range configs.configs { + for _, c := range configs.GetItems() { _, ok := c.GetState().(*RuleStopState) if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { @@ -929,14 +915,14 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNCon // prediction, which is where predicates need to evaluate. type ATNConfigSetPair struct { - item0, item1 *ATNConfigSet + item0, item1 ATNConfigSet } -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { - succeeded := NewATNConfigSet(configs.fullCtx) - failed := NewATNConfigSet(configs.fullCtx) +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { + succeeded := NewBaseATNConfigSet(configs.FullContext()) + failed := NewBaseATNConfigSet(configs.FullContext()) - for _, c := range configs.configs { + for _, c := range configs.GetItems() { if c.GetSemanticContext() != SemanticContextNone { predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) if predicateEvaluationResult { @@ -948,16 +934,15 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfig succeeded.Add(c, nil) } } - return []*ATNConfigSet{succeeded, failed} + return []ATNConfigSet{succeeded, failed} } -// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the -// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an -// un-predicated runtimeConfig which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. +// Look through a list of predicate/alt pairs, returning alts for the // -//goland:noinspection GoBoolExpressions +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { predictions := NewBitSet() for i := 0; i < len(predPredictions); i++ { @@ -971,11 +956,11 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti } predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) } if predicateEvaluationResult { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) } predictions.add(pair.alt) @@ -987,82 +972,19 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if runtimeConfig.parserATNSimulatorTraceATNSim { +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if ParserATNSimulatorTraceATNSim { fmt.Println("closure(" + config.String() + ")") - } - - var stack []*ATNConfig - visited := make(map[*ATNConfig]bool) - - stack = append(stack, config) - - for len(stack) > 0 { - currConfig := stack[len(stack)-1] - stack = stack[:len(stack)-1] - - if _, ok := visited[currConfig]; ok { - continue - } - visited[currConfig] = true - - if _, ok := currConfig.GetState().(*RuleStopState); ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !currConfig.GetContext().isEmpty() { - for i := 0; i < currConfig.GetContext().length(); i++ { - if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) - configs.Add(nb, p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if runtimeConfig.parserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) - } - p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] - newContext := currConfig.GetContext().GetParent(i) // "pop" return state - - c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) - - stack = append(stack, c) - } - continue - } else if fullCtx { - // reached end of start rule - configs.Add(currConfig, p.mergeCache) - continue - } else { - // else if we have no context info, just chase follow links (if greedy) - if runtimeConfig.parserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) - } - } + //fmt.Println("configs(" + configs.String() + ")") + if config.GetReachesIntoOuterContext() > 50 { + panic("problem") } - - p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } -} - -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if runtimeConfig.parserATNSimulatorTraceATNSim { - fmt.Println("closure(" + config.String() + ")") } if _, ok := config.GetState().(*RuleStopState); ok { @@ -1072,12 +994,11 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig for i := 0; i < config.GetContext().length(); i++ { if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { if fullCtx { - nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) - configs.Add(nb, p.mergeCache) + configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) continue } else { // we have no context info, just chase follow links (if greedy) - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) @@ -1087,7 +1008,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig returnState := p.atn.states[config.GetContext().getReturnState(i)] newContext := config.GetContext().GetParent(i) // "pop" return state - c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) // While we have context to pop back from, we may have // gotten that context AFTER having falling off a rule. // Make sure we track that we are now out of context. @@ -1101,7 +1022,7 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig return } else { // else if we have no context info, just chase follow links (if greedy) - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } } @@ -1109,10 +1030,8 @@ func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) } -// Do the actual work of walking epsilon edges -// -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +// Do the actual work of walking epsilon edges// +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1129,7 +1048,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe _, ok := t.(*ActionTransition) continueCollecting := collectPredicates && !ok c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if c != nil { + if ci, ok := c.(*BaseATNConfig); ok && ci != nil { newDepth := depth if _, ok := config.GetState().(*RuleStopState); ok { @@ -1137,7 +1056,7 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe // We can't get here if incoming config was rule stop and we had context // track how far we dip into outer context. Might // come in handy and we avoid evaluating context dependent - // preds if this is > 0. + // preds if p is > 0. if p.dfa != nil && p.dfa.getPrecedenceDfa() { if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { @@ -1153,9 +1072,9 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe continue } - configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method newDepth-- - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("dips into outer ctx: " + c.String()) } } else { @@ -1179,9 +1098,8 @@ func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSe } } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { - if !runtimeConfig.lRLoopEntryBranchOpt { +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { + if TurnOffLRLoopEntryBranchOpt { return false } @@ -1278,7 +1196,7 @@ func (p *ParserATNSimulator) getRuleName(index int) string { return sb.String() } -func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { +func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { switch t.getSerializationType() { case TransitionRULE: @@ -1290,13 +1208,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c case TransitionACTION: return p.actionTransition(config, t.(*ActionTransition)) case TransitionEPSILON: - return NewATNConfig4(config, t.getTarget()) + return NewBaseATNConfig4(config, t.getTarget()) case TransitionATOM, TransitionRANGE, TransitionSET: // EOF transitions act like epsilon transitions after the first EOF // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewATNConfig4(config, t.getTarget()) + return NewBaseATNConfig4(config, t.getTarget()) } } return nil @@ -1305,63 +1223,60 @@ func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, c } } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { - if runtimeConfig.parserATNSimulatorDebug { +func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) } - return NewATNConfig4(config, t.getTarget()) + return NewBaseATNConfig4(config, t.getTarget()) } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { +func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *ATNConfig + var c *BaseATNConfig if collectPredicates && inContext { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly // during closure, which dramatically reduces the size of - // the runtimeConfig sets. It also obviates the need to test predicates + // the config sets. It also obviates the need to test predicates // later during conflict resolution. currentPosition := p.input.Index() p.input.Seek(p.startIndex) predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewATNConfig4(config, pt.getTarget()) // no pred context + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewATNConfig4(config, pt.getTarget()) + c = NewBaseATNConfig4(config, pt.getTarget()) } - if runtimeConfig.parserATNSimulatorDebug { - fmt.Println("runtimeConfig from pred transition=" + c.String()) + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) } return c } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { +func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *ATNConfig + var c *BaseATNConfig if collectPredicates && (!pt.isCtxDependent || inContext) { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly @@ -1373,92 +1288,78 @@ func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTran predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewATNConfig4(config, pt.getTarget()) // no pred context + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewATNConfig4(config, pt.getTarget()) + c = NewBaseATNConfig4(config, pt.getTarget()) } - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("config from pred transition=" + c.String()) } return c } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { - if runtimeConfig.parserATNSimulatorDebug { +func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) } returnState := t.followState newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewATNConfig1(config, t.getTarget(), newContext) + return NewBaseATNConfig1(config, t.getTarget(), newContext) } -func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { altsets := PredictionModegetConflictingAltSubsets(configs) return PredictionModeGetAlts(altsets) } -// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of +// Sam pointed out a problem with the previous definition, v3, of // ambiguous states. If we have another state associated with conflicting // alternatives, we should keep going. For example, the following grammar // -// s : (ID | ID ID?) ; -// -// When the [ATN] simulation reaches the state before ;, it has a [DFA] -// state that looks like: -// -// [12|1|[], 6|2|[], 12|2|[]]. -// -// Naturally -// -// 12|1|[] and 12|2|[] -// -// conflict, but we cannot stop processing this node -// because alternative to has another way to continue, via -// -// [6|2|[]]. +// s : (ID | ID ID?) '' // +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node +// because alternative to has another way to continue, via [6|2|[]]. // The key is that we have a single state that has config's only associated // with a single alternative, 2, and crucially the state transitions // among the configurations are all non-epsilon transitions. That means // we don't consider any conflicts that include alternative 2. So, we // ignore the conflict between alts 1 and 2. We ignore a set of // conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state config-list map. +// associated with a single alt state in the state&rarrconfig-list map. // // It's also the case that we might have two conflicting configurations but -// also a 3rd non-conflicting configuration for a different alternative: -// -// [1|1|[], 1|2|[], 8|3|[]]. -// -// This can come about from grammar: +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: // -// a : A | A | A B +// a : A | A | A B // // After Matching input A, we reach the stop state for rule A, state 1. // State 8 is the state right before B. Clearly alternatives 1 and 2 // conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue, so we do not -// stop working on this state. -// -// In the previous example, we're concerned +// However, alternative 3 will be able to continue and so we do not +// stop working on p state. In the previous example, we're concerned // with states associated with the conflicting alternatives. Here alt // 3 is not associated with the conflicting configs, but since we can continue // looking for input reasonably, I don't declare the state done. We // ignore a set of conflicting alts when we have an alternative // that we still need to pursue. -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { +// + +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { var conflictingAlts *BitSet - if configs.uniqueAlt != ATNInvalidAltNumber { + if configs.GetUniqueAlt() != ATNInvalidAltNumber { conflictingAlts = NewBitSet() - conflictingAlts.add(configs.uniqueAlt) + conflictingAlts.add(configs.GetUniqueAlt()) } else { - conflictingAlts = configs.conflictingAlts + conflictingAlts = configs.GetConflictingAlts() } return conflictingAlts } @@ -1483,10 +1384,11 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { return p.GetTokenName(input.LA(1)) } -// Used for debugging in [AdaptivePredict] around [execATN], but I cut -// it out for clarity now that alg. works well. We can leave this -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { +// Used for debugging in AdaptivePredict around execATN but I cut +// +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { panic("Not implemented") @@ -1516,13 +1418,13 @@ func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { // } } -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) } -func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { +func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { alt := ATNInvalidAltNumber - for _, c := range configs.configs { + for _, c := range configs.GetItems() { if alt == ATNInvalidAltNumber { alt = c.GetAlt() // found first alt } else if c.GetAlt() != alt { @@ -1550,10 +1452,8 @@ func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { // @return If {@code to} is {@code nil}, p method returns {@code nil} // otherwise p method returns the result of calling {@link //addDFAState} // on {@code to} -// -//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) } if to == nil { @@ -1572,7 +1472,7 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA from.setIthEdge(t+1, to) // connect p.atn.edgeMu.Unlock() - if runtimeConfig.parserATNSimulatorDebug { + if ParserATNSimulatorDebug { var names []string if p.parser != nil { names = p.parser.GetLiteralNames() @@ -1583,49 +1483,48 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA return to } -// addDFAState adds state D to the [DFA] if it is not already present, and returns -// the actual instance stored in the [DFA]. If a state equivalent to D -// is already in the [DFA], the existing state is returned. Otherwise, this -// method returns D after adding it to the [DFA]. +// Add state {@code D} to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to {@code D} +// is already in the DFA, the existing state is returned. Otherwise p +// method returns {@code D} after adding it to the DFA. // -// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and -// does not change the DFA. +//If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and +// does not change the DFA.
// -//goland:noinspection GoBoolExpressions +// @param dfa The dfa +// @param D The DFA state to add +// @return The state stored in the DFA. This will be either the existing +// state if {@code D} is already in the DFA, or {@code D} itself if the +// state was not already present. func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { if d == ATNSimulatorError { return d } - - existing, present := dfa.Get(d) + existing, present := dfa.states.Get(d) if present { - if runtimeConfig.parserATNSimulatorTraceATNSim { + if ParserATNSimulatorTraceATNSim { fmt.Print("addDFAState " + d.String() + " exists") } return existing } - // The state will be added if not already there or we will be given back the existing state struct - // if it is present. + // The state was not present, so update it with configs // - d.stateNumber = dfa.Len() - if !d.configs.readOnly { - d.configs.OptimizeConfigs(&p.BaseATNSimulator) - d.configs.readOnly = true - d.configs.configLookup = nil + d.stateNumber = dfa.states.Len() + if !d.configs.ReadOnly() { + d.configs.OptimizeConfigs(p.BaseATNSimulator) + d.configs.SetReadOnly(true) } - dfa.Put(d) - - if runtimeConfig.parserATNSimulatorTraceATNSim { + dfa.states.Put(d) + if ParserATNSimulatorTraceATNSim { fmt.Println("addDFAState new " + d.String()) } return d } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1635,9 +1534,8 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl } } -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1647,15 +1545,10 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, } } -// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. -// -// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer -// so that they can see that this is happening and can take action if they want to. -// -//goland:noinspection GoBoolExpressions -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { - if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { +// If context sensitive parsing, we know it's ambiguity not conflict// +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go similarity index 77% rename from vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go index c249bc1385..1c8cee7479 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go @@ -31,9 +31,7 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int + *BaseRuleContext start, stop Token exception RecognitionException @@ -42,22 +40,8 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) - InitBaseParserRuleContext(prc, parent, invokingStateNumber) - return prc -} - -func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { - // What context invoked b rule? - prc.parentCtx = parent - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - prc.invokingState = -1 - } else { - prc.invokingState = invokingStateNumber - } + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -72,6 +56,8 @@ func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleCont // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil + + return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -104,15 +90,14 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// EnterRule is called when any rule is entered. -func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { +// Double dispatch methods for listeners +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { } -// ExitRule is called when any rule is exited. -func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { } -// * Does not set parent link other add methods do that +// * Does not set parent link other add methods do that/// func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -135,9 +120,10 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as -// we entered a rule. If we have a label, we will need to remove -// the generic ruleContext object. +// * Used by EnterOuterAlt to toss out a RuleContext previously added as +// we entered a rule. If we have // label, we will need to remove +// generic ruleContext object. +// / func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -307,7 +293,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -354,50 +340,6 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } -func (prc *BaseParserRuleContext) SetParent(v Tree) { - if v == nil { - prc.parentCtx = nil - } else { - prc.parentCtx = v.(RuleContext) - } -} - -func (prc *BaseParserRuleContext) GetInvokingState() int { - return prc.invokingState -} - -func (prc *BaseParserRuleContext) SetInvokingState(t int) { - prc.invokingState = t -} - -func (prc *BaseParserRuleContext) GetRuleIndex() int { - return prc.RuleIndex -} - -func (prc *BaseParserRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} - -// IsEmpty returns true if the context of b is empty. -// -// A context is empty if there is no invoking state, meaning nobody calls -// current context. -func (prc *BaseParserRuleContext) IsEmpty() bool { - return prc.invokingState == -1 -} - -// GetParent returns the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -// -// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of this -// method. -func (prc *BaseParserRuleContext) GetParent() Tree { - return prc.parentCtx -} - var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -408,7 +350,6 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } -//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go new file mode 100644 index 0000000000..ba62af3610 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go @@ -0,0 +1,806 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +// Represents {@code $} in local context prediction, which means wildcard. +// {@code//+x =//}. +// / +const ( + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// Represents {@code $} in an array in full context mode, when {@code $} +// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, +// {@code $} = {@link //EmptyReturnState}. +// / + +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +type PredictionContext interface { + Hash() int + Equals(interface{}) bool + GetParent(int) PredictionContext + getReturnState(int) int + length() int + isEmpty() bool + hasEmptyPath() bool + String() string +} + +type BasePredictionContext struct { + cachedHash int +} + +func NewBasePredictionContext(cachedHash int) *BasePredictionContext { + pc := new(BasePredictionContext) + pc.cachedHash = cachedHash + + return pc +} + +func (b *BasePredictionContext) isEmpty() bool { + return false +} + +func calculateHash(parent PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +// Used to cache {@link BasePredictionContext} objects. Its used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. + +type PredictionContextCache struct { + cache map[PredictionContext]PredictionContext +} + +func NewPredictionContextCache() *PredictionContextCache { + t := new(PredictionContextCache) + t.cache = make(map[PredictionContext]PredictionContext) + return t +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a Newcontext to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { + if ctx == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY + } + existing := p.cache[ctx] + if existing != nil { + return existing + } + p.cache[ctx] = ctx + return ctx +} + +func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { + return p.cache[ctx] +} + +func (p *PredictionContextCache) length() int { + return len(p.cache) +} + +type SingletonPredictionContext interface { + PredictionContext +} + +type BaseSingletonPredictionContext struct { + *BasePredictionContext + + parentCtx PredictionContext + returnState int +} + +func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { + var cachedHash int + if parent != nil { + cachedHash = calculateHash(parent, returnState) + } else { + cachedHash = calculateEmptyHash() + } + + s := new(BaseSingletonPredictionContext) + s.BasePredictionContext = NewBasePredictionContext(cachedHash) + + s.parentCtx = parent + s.returnState = returnState + + return s +} + +func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func (b *BaseSingletonPredictionContext) length() int { + return 1 +} + +func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { + return b.parentCtx +} + +func (b *BaseSingletonPredictionContext) getReturnState(index int) int { + return b.returnState +} + +func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { + return b.returnState == BasePredictionContextEmptyReturnState +} + +func (b *BaseSingletonPredictionContext) Hash() int { + return b.cachedHash +} + +func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { + if b == other { + return true + } + if _, ok := other.(*BaseSingletonPredictionContext); !ok { + return false + } + + otherP := other.(*BaseSingletonPredictionContext) + + if b.returnState != otherP.getReturnState(0) { + return false + } + if b.parentCtx == nil { + return otherP.parentCtx == nil + } + + return b.parentCtx.Equals(otherP.parentCtx) +} + +func (b *BaseSingletonPredictionContext) String() string { + var up string + + if b.parentCtx == nil { + up = "" + } else { + up = b.parentCtx.String() + } + + if len(up) == 0 { + if b.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(b.returnState) + } + + return strconv.Itoa(b.returnState) + " " + up +} + +var BasePredictionContextEMPTY = NewEmptyPredictionContext() + +type EmptyPredictionContext struct { + *BaseSingletonPredictionContext +} + +func NewEmptyPredictionContext() *EmptyPredictionContext { + + p := new(EmptyPredictionContext) + + p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) + p.cachedHash = calculateEmptyHash() + return p +} + +func (e *EmptyPredictionContext) isEmpty() bool { + return true +} + +func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { + return nil +} + +func (e *EmptyPredictionContext) getReturnState(index int) int { + return e.returnState +} + +func (e *EmptyPredictionContext) Hash() int { + return e.cachedHash +} + +func (e *EmptyPredictionContext) Equals(other interface{}) bool { + return e == other +} + +func (e *EmptyPredictionContext) String() string { + return "$" +} + +type ArrayPredictionContext struct { + *BasePredictionContext + + parents []PredictionContext + returnStates []int +} + +func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + + hash = murmurFinish(hash, len(parents)<<1) + + c := new(ArrayPredictionContext) + c.BasePredictionContext = NewBasePredictionContext(hash) + + c.parents = parents + c.returnStates = returnStates + + return c +} + +func (a *ArrayPredictionContext) GetReturnStates() []int { + return a.returnStates +} + +func (a *ArrayPredictionContext) hasEmptyPath() bool { + return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) isEmpty() bool { + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return a.returnStates[0] == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) length() int { + return len(a.returnStates) +} + +func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { + return a.parents[index] +} + +func (a *ArrayPredictionContext) getReturnState(index int) int { + return a.returnStates[index] +} + +// Equals is the default comparison function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Equals(o interface{}) bool { + if a == o { + return true + } + other, ok := o.(*ArrayPredictionContext) + if !ok { + return false + } + if a.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(a.returnStates, other.returnStates) && + slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { + return x.Equals(y) + }) +} + +// Hash is the default hash function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Hash() int { + return a.BasePredictionContext.cachedHash +} + +func (a *ArrayPredictionContext) String() string { + if a.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(a.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if a.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(a.returnStates[i]) + if a.parents[i] != nil { + s = s + " " + a.parents[i].String() + } else { + s = s + "nil" + } + } + + return s + "]" +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test + // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created + // from it. + // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion + // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from + // either of them. + + ac, ok1 := a.(*BaseSingletonPredictionContext) + bc, ok2 := b.(*BaseSingletonPredictionContext) + + if ok1 && ok2 { + return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if rootIsWildcard { + if _, ok := a.(*EmptyPredictionContext); ok { + return a + } + if _, ok := b.(*EmptyPredictionContext); ok { + return b + } + } + + // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters + // here. + // + // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here + + var arp, arb *ArrayPredictionContext + var ok bool + if arp, ok = a.(*ArrayPredictionContext); ok { + } else if _, ok = a.(*BaseSingletonPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } else if _, ok = a.(*EmptyPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) + } + + if arb, ok = b.(*ArrayPredictionContext); ok { + } else if _, ok = b.(*BaseSingletonPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } else if _, ok = b.(*EmptyPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) + } + + // Both arp and arb + return mergeArrays(arp, arb, rootIsWildcard, mergeCache) +} + +// Merge two {@link SingletonBasePredictionContext} instances. +// +//Stack tops equal, parents merge is same return left graph.
+//
Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
These local-context merge operations are used when {@code rootIsWildcard} +// is true.
+// +//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
Special case of last merge if local context.
+//
These full-context merge operations are used when {@code rootIsWildcard} +// is false.
+// +// +// +//Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
Different tops, different parents.
+//
Shared top, same parents.
+//
Shared top, different parents.
+//
Shared top, all shared parents.
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+ // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // {@link //LL} prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the {@link //SLL} prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful {@link //LL} prediction abilities to complete successfully.
+ // + //+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.
+ // + PredictionModeSLL = 0 + // + // The LL(*) prediction mode. This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + //+ // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // Report a precise answer for exactly which alternatives are + // ambiguous.
+ // + //+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.
+ // + PredictionModeLL = 1 + // + // The LL(*) prediction mode with exact ambiguity detection. In addition to + // the correctness guarantees provided by the {@link //LL} prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + //+ // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary.
+ // + //+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.
+ // + PredictionModeLLExactAmbigDetection = 2 +) + +// Computes the SLL prediction termination condition. +// +//+// This method computes the SLL prediction termination condition for both of +// the following cases.
+// +//COMBINED SLL+LL PARSING
+// +//When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction.
+// +//Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative (e.g. +// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations.
+// +//Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty.
+// +//HEURISTIC
+// +//As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon):
+// +//{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
+// +//When the ATN simulation reaches the state before {@code ”}, it has a +// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally +// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// processing this node because alternative to has another way to continue, +// via {@code [6|2|[]]}.
+// +//It also let's us continue for this rule:
+// +//{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+// +//After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done.
+// +//PURE SLL PARSING
+// +//To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic.
+// +//PREDICATES IN SLL+LL PARSING
+// +//SLL decisions don't evaluate predicates until after they reach DFA stop +// states because they need to create the DFA cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates.
+// +//Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// semantic predicate contexts so we might see two configurations like the +// following.
+// +//{@code (s, 1, x, {}), (s, 1, x', {p})}
+// +//Before testing these configurations against others, we have to merge +// {@code x} and {@code x'} (without modifying the existing configurations). +// For example, we test {@code (x+x')==x”} when looking for conflicts in +// the following configurations.
+// +//{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
+// +//If the configuration set has predicates (as indicated by +// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// the configurations to strip out all of the predicates so that a standard +// {@link ATNConfigSet} will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.HasSemanticContext() { + // dup configs, tossing out semantic predicates + dup := NewBaseATNConfigSet(false) + for _, c := range configs.GetItems() { + + // NewBaseATNConfig({semanticContext:}, c) + c = NewBaseATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// Checks if any configuration in {@code configs} is in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if any configuration in {@code configs} is in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// Checks if all configurations in {@code configs} are in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if all configurations in {@code configs} are in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { + + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// Full LL prediction termination. +// +//Can we stop looking ahead during ATN simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict.
+// +//The basic idea is to split the set of configurations {@code C}, into +// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical {@link ATNConfig//state} and {@link ATNConfig//context} values +// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} +// and {@code (s, j, ctx, _)} for {@code i!=j}.
+// +//Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows:
+// +//{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in +// {@code C} holding {@code s} and {@code ctx} fixed.
+// +//Or in pseudo-code, for each configuration {@code c} in {@code C}:
+// +//+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not +// alt and not pred +//+// +//
The values in {@code map} are the set of {@code A_s,ctx} sets.
+// +//If {@code |A_s,ctx|=1} then there is no conflict associated with +// {@code s} and {@code ctx}.
+// +//Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// more lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round.
+// +//The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal you will still have the conflict. It's just inefficient. It +// might even look until the end of file.
+// +//No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check.
+// +//CONFLICTING CONFIGS
+// +//Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict +// when {@code i!=j} but {@code x=x'}. Because we merge all +// {@code (s, i, _)} configurations together, that means that there are at +// most {@code n} configurations associated with state {@code s} for +// {@code n} possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts {@code x} and +// {@code x'}. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either {@code x} or +// {@code x'}. If the {@code x} associated with lowest alternative {@code i} +// is the superset, then {@code i} is the only possible prediction since the +// others resolve to {@code min(i)} as well. However, if {@code x} is +// associated with {@code j>i} then at least one stack configuration for +// {@code j} is not in conflict with alternative {@code i}. The algorithm +// should keep going, looking for more lookahead due to the uncertainty.
+// +//For simplicity, I'm doing a equality check between {@code x} and +// {@code x'} that lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict.
+// +//CONTINUE/STOP RULE
+// +//Continue if union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict.
+// +//The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set.
+// +//CASES
+// +//EXACT AMBIGUITY DETECTION
+// +//If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set.
+// +//|A_i|>1
and
+// A_i = A_j
for all i, j.
In other words, we continue examining lookahead until all {@code A_i} +// have more than one alternative and all {@code A_i} are the same. If +// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {@code {1}}. To determine what the real +// ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like +// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// Determines if every alternative subset in {@code altsets} contains more +// than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every {@link BitSet} in {@code altsets} has +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// Determines if any single alternative subset in {@code altsets} contains +// exactly one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// Determines if any single alternative subset in {@code altsets} contains +// more than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// Determines if every alternative subset in {@code altsets} is equivalent. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every member of {@code altsets} is equal to the +// others, otherwise {@code false} +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// Returns the unique alternative predicted by all alternative subsets in +// {@code altsets}. If no such alternative exists, this method returns +// {@link ATN//INVALID_ALT_NUMBER}. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// Gets the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each {@link BitSet} +// in {@code altsets}. +// +// @param altsets a collection of alternative subsets +// @return the set of represented alternatives in {@code altsets} +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// For each configuration {@code c} in {@code configs}: +// +//+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not +// alt and not pred +//+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { + configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) + + for _, c := range configs.GetItems() { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each +// configuration {@code c} in {@code configs}: +// +//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} +//+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.GetItems() { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go similarity index 70% rename from vendor/github.com/antlr4-go/antlr/v4/recognizer.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go index dcb8548cd1..bfe542d091 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go @@ -26,9 +26,6 @@ type Recognizer interface { RemoveErrorListeners() GetATN() *ATN GetErrorListenerDispatch() ErrorListener - HasError() bool - GetError() RecognitionException - SetError(RecognitionException) } type BaseRecognizer struct { @@ -39,7 +36,6 @@ type BaseRecognizer struct { LiteralNames []string SymbolicNames []string GrammarFileName string - SynErr RecognitionException } func NewBaseRecognizer() *BaseRecognizer { @@ -49,32 +45,17 @@ func NewBaseRecognizer() *BaseRecognizer { return rec } -//goland:noinspection GoUnusedGlobalVariable var tokenTypeMapCache = make(map[string]int) - -//goland:noinspection GoUnusedGlobalVariable var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.13.1" + runtimeVersion := "4.12.0" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } } -func (b *BaseRecognizer) SetError(err RecognitionException) { - b.SynErr = err -} - -func (b *BaseRecognizer) HasError() bool { - return b.SynErr != nil -} - -func (b *BaseRecognizer) GetError() RecognitionException { - return b.SynErr -} - -func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { +func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { panic("action not implemented on Recognizer!") } @@ -124,11 +105,9 @@ func (b *BaseRecognizer) SetState(v int) { // return result //} -// GetRuleIndexMap Get a map from rule names to rule indexes. +// Get a map from rule names to rule indexes. // -// Used for XPath and tree pattern compilation. -// -// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. +//
Used for XPath and tree pattern compilation.
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") @@ -145,8 +124,7 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { // return result } -// GetTokenType get the token type based upon its name -func (b *BaseRecognizer) GetTokenType(_ string) int { +func (b *BaseRecognizer) GetTokenType(tokenName string) int { panic("Method not defined!") // var ttype = b.GetTokenTypeMap()[tokenName] // if (ttype !=nil) { @@ -184,27 +162,26 @@ func (b *BaseRecognizer) GetTokenType(_ string) int { // } //} -// GetErrorHeader returns the error header, normally line/character position information. -// -// Can be overridden in sub structs embedding BaseRecognizer. +// What is the error header, normally line/character position information?// func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { line := e.GetOffendingToken().GetLine() column := e.GetOffendingToken().GetColumn() return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) } -// GetTokenErrorDisplay shows how a token should be displayed in an error message. +// How should a token be displayed in an error message? The default // -// The default is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. // -// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific -// implementations of [ANTLRErrorStrategy] may provide a similar -// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() +// @deprecated This method is not called by the ANTLR 4 Runtime. Specific +// implementations of {@link ANTLRErrorStrategy} may provide a similar +// feature when necessary. For example, see +// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { if t == nil { return "+// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b +// method. +// + +func (b *BaseRuleContext) GetParent() Tree { + return b.parentCtx +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go similarity index 92% rename from vendor/github.com/antlr4-go/antlr/v4/semantic_context.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go index 68cb9061eb..a702e99def 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go @@ -9,13 +9,14 @@ import ( "strconv" ) -// SemanticContext is a tree structure used to record the semantic context in which +// A tree structure used to record the semantic context in which +// an ATN configuration is valid. It's either a single predicate, +// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. // -// an ATN configuration is valid. It's either a single predicate, -// a conjunction p1 && p2, or a sum of products p1 || p2. +//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +// {@link SemanticContext} within the scope of this outer class.
// -// I have scoped the AND, OR, and Predicate subclasses of -// [SemanticContext] within the scope of this outer ``class'' + type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -79,7 +80,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { return p } @@ -197,7 +198,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -229,7 +230,9 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - copy(opnds, vs) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } and := new(AND) and.opnds = opnds @@ -313,12 +316,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (o *OR) Hash() int { - h := murmurInit(41) // Init with o value different from AND - for _, op := range o.opnds { +func (a *OR) Hash() int { + h := murmurInit(41) // Init with a value different from AND + for _, op := range a.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(o.opnds)) + return murmurFinish(h, len(a.opnds)) } func (a *AND) String() string { @@ -346,7 +349,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -379,7 +382,9 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - copy(opnds, vs) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } o := new(OR) o.opnds = opnds diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go similarity index 74% rename from vendor/github.com/antlr4-go/antlr/v4/token.go rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go index f5bc34229d..f73b06bc6a 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/token.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go @@ -35,8 +35,6 @@ type Token interface { GetTokenSource() TokenSource GetInputStream() CharStream - - String() string } type BaseToken struct { @@ -55,7 +53,7 @@ type BaseToken struct { const ( TokenInvalidType = 0 - // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state + // During lookahead operations, this "token" signifies we hit rule end ATN state // and did not follow it despite needing to. TokenEpsilon = -2 @@ -63,16 +61,15 @@ const ( TokenEOF = -1 - // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. - // - // All tokens go to the parser (unless [Skip] is called in the lexer rule) + // All tokens go to the parser (unless Skip() is called in that rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. + TokenDefaultChannel = 0 - // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. - // - // Anything on a different channel than TokenDefaultChannel is not parsed by parser. + // Anything on different channel than DEFAULT_CHANNEL is not parsed + // by parser. + TokenHiddenChannel = 1 ) @@ -104,25 +101,6 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } -func (b *BaseToken) GetText() string { - if b.text != "" { - return b.text - } - input := b.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if b.GetStart() < n && b.GetStop() < n { - return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) - } - return "Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A new root node is created to point to the
-// merged parents.
-//
Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
These local-context merge operations are used when {@code rootIsWildcard} -// is true.
-// -//{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
Special case of last merge if local context.
-//
These full-context merge operations are used when {@code rootIsWildcard} -// is false.
-// -// -// -//Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
Different tops, different parents.
-//
Shared top, same parents.
-//
Shared top, different parents.
-//
Shared top, all shared parents.
-//
Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//