From ed53b15d06990a7c90d4bbd2ecaad4670933553d Mon Sep 17 00:00:00 2001 From: Jem Davies <131159520+jem-davies@users.noreply.github.com> Date: Sun, 8 Sep 2024 18:54:21 +0100 Subject: [PATCH] remove references to depreciated function meta() in docs (#97) * remove references to depreciated function meta() in docs Signed-off-by: Jem Davies * add caution box to website/docs/configuration/metadata.md regarding meta() + metadata() functions Signed-off-by: Jem Davies * remove reference to meta() on index.tsx Signed-off-by: Jem Davies --------- Signed-off-by: Jem Davies --- internal/config/test/docs.md | 2 +- internal/impl/amqp09/output.go | 2 +- internal/impl/azure/output_table_storage.go | 6 +++--- .../confluent/processor_schema_registry_encode.go | 2 +- internal/impl/kafka/output_kafka_franz.go | 2 +- internal/impl/nats/output_jetstream.go | 2 +- internal/impl/nats/processor_request_reply.go | 2 +- internal/impl/pure/processor_cached.go | 4 ++-- internal/impl/redis/processor.go | 2 +- website/cookbooks/joining_streams.md | 4 ++-- website/cookbooks/kafka_topic_mirroring.md | 10 +++++----- website/docs/components/about.md | 2 +- website/docs/components/metrics/about.md | 2 +- website/docs/components/outputs/about.md | 4 ++-- website/docs/components/outputs/amqp_0_9.md | 2 +- .../docs/components/outputs/azure_table_storage.md | 8 ++++---- website/docs/components/outputs/kafka_franz.md | 6 +++--- website/docs/components/outputs/nats_jetstream.md | 2 +- website/docs/components/processors/cached.md | 4 ++-- .../docs/components/processors/nats_request_reply.md | 2 +- website/docs/components/processors/redis.md | 2 +- .../components/processors/schema_registry_encode.md | 2 +- website/docs/configuration/about.md | 4 ++-- website/docs/configuration/interpolation.md | 4 ++-- website/docs/configuration/metadata.md | 12 +++++++++--- website/docs/configuration/unit_testing.md | 2 +- website/docs/guides/streams_mode/about.md | 2 +- website/src/pages/index.tsx | 2 +- 28 files changed, 53 insertions(+), 47 deletions(-) diff --git a/internal/config/test/docs.md b/internal/config/test/docs.md index f48cdca9f..31ca0565e 100644 --- a/internal/config/test/docs.md +++ b/internal/config/test/docs.md @@ -37,7 +37,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' ``` One way to write our unit tests for this config is to accompany it with a file of the same name and extension but suffixed with `_bento_test`, which in this case would be `foo_bento_test.yaml`. diff --git a/internal/impl/amqp09/output.go b/internal/impl/amqp09/output.go index 504e45a66..0ae410349 100644 --- a/internal/impl/amqp09/output.go +++ b/internal/impl/amqp09/output.go @@ -95,7 +95,7 @@ The fields 'key', 'exchange' and 'type' can be dynamically set using function in Description("Set the priority of each message with a dynamic interpolated expression."). Advanced(). Example("0"). - Example(`${! meta("amqp_priority") }`). + Example(`${! metadata("amqp_priority") }`). Example(`${! json("doc.priority") }`). Default(""), service.NewOutputMaxInFlightField(), diff --git a/internal/impl/azure/output_table_storage.go b/internal/impl/azure/output_table_storage.go index f91667ae6..acf15ed6d 100644 --- a/internal/impl/azure/output_table_storage.go +++ b/internal/impl/azure/output_table_storage.go @@ -110,7 +110,7 @@ properties: Fields( service.NewInterpolatedStringField(tsoFieldTableName). Description("The table to store messages into."). - Example(`${! meta("kafka_topic") }`).Example(`${! json("table") }`), + Example(`${! metadata("kafka_topic") }`).Example(`${! json("table") }`), service.NewInterpolatedStringField(tsoFieldPartitionKey). Description("The partition key."). Example(`${! json("date") }`). @@ -124,12 +124,12 @@ properties: Default(map[string]any{}), service.NewInterpolatedStringEnumField(tsoFieldInsertType, `INSERT`, `INSERT_MERGE`, `INSERT_REPLACE`). Description("Type of insert operation. Valid options are `INSERT`, `INSERT_MERGE` and `INSERT_REPLACE`"). - Example(`${! json("operation") }`).Example(`${! meta("operation") }`).Example(`INSERT`). + Example(`${! json("operation") }`).Example(`${! metadata("operation") }`).Example(`INSERT`). Advanced().Deprecated(). Default(""), service.NewInterpolatedStringEnumField(tsoFieldTransactionType, `INSERT`, `INSERT_MERGE`, `INSERT_REPLACE`, `UPDATE_MERGE`, `UPDATE_REPLACE`, `DELETE`). Description("Type of transaction operation."). - Example(`${! json("operation") }`).Example(`${! meta("operation") }`).Example(`INSERT`). + Example(`${! json("operation") }`).Example(`${! metadata("operation") }`).Example(`INSERT`). Advanced(). Default("INSERT"), service.NewOutputMaxInFlightField(). diff --git a/internal/impl/confluent/processor_schema_registry_encode.go b/internal/impl/confluent/processor_schema_registry_encode.go index 0dcb71a07..465b60c5c 100644 --- a/internal/impl/confluent/processor_schema_registry_encode.go +++ b/internal/impl/confluent/processor_schema_registry_encode.go @@ -62,7 +62,7 @@ We will be considering alternative approaches in future so please [get in touch] Field(service.NewURLField("url").Description("The base URL of the schema registry service.")). Field(service.NewInterpolatedStringField("subject").Description("The schema subject to derive schemas from."). Example("foo"). - Example(`${! meta("kafka_topic") }`)). + Example(`${! metadata("kafka_topic") }`)). Field(service.NewStringField("refresh_period"). Description("The period after which a schema is refreshed for each subject, this is done by polling the schema registry service."). Default("10m"). diff --git a/internal/impl/kafka/output_kafka_franz.go b/internal/impl/kafka/output_kafka_franz.go index 2b540e2dc..55a15ea73 100644 --- a/internal/impl/kafka/output_kafka_franz.go +++ b/internal/impl/kafka/output_kafka_franz.go @@ -46,7 +46,7 @@ This output often out-performs the traditional ` + "`kafka`" + ` output as well Advanced().Optional()). Field(service.NewInterpolatedStringField("partition"). Description("An optional explicit partition to set for each message. This field is only relevant when the `partitioner` is set to `manual`. The provided interpolation string must be a valid integer."). - Example(`${! meta("partition") }`). + Example(`${! metadata("partition") }`). Optional()). Field(service.NewStringField("client_id"). Description("An identifier for the client connection."). diff --git a/internal/impl/nats/output_jetstream.go b/internal/impl/nats/output_jetstream.go index e8f5ecc57..f76b1aab3 100644 --- a/internal/impl/nats/output_jetstream.go +++ b/internal/impl/nats/output_jetstream.go @@ -23,7 +23,7 @@ func natsJetStreamOutputConfig() *service.ConfigSpec { Field(service.NewInterpolatedStringField("subject"). Description("A subject to write to."). Example("foo.bar.baz"). - Example(`${! meta("kafka_topic") }`). + Example(`${! metadata("kafka_topic") }`). Example(`foo.${! json("meta.type") }`)). Field(service.NewInterpolatedStringMapField("headers"). Description("Explicit message headers to add to messages."). diff --git a/internal/impl/nats/processor_request_reply.go b/internal/impl/nats/processor_request_reply.go index 88cad5469..4de952452 100644 --- a/internal/impl/nats/processor_request_reply.go +++ b/internal/impl/nats/processor_request_reply.go @@ -38,7 +38,7 @@ You can access these metadata fields using [function interpolation](/docs/config Field(service.NewInterpolatedStringField("subject"). Description("A subject to write to."). Example("foo.bar.baz"). - Example(`${! meta("kafka_topic") }`). + Example(`${! metadata("kafka_topic") }`). Example(`foo.${! json("meta.type") }`)). Field(service.NewStringField("inbox_prefix"). Description("Set an explicit inbox prefix for the response subject"). diff --git a/internal/impl/pure/processor_cached.go b/internal/impl/pure/processor_cached.go index 111d6a4f0..d11a7c1ad 100644 --- a/internal/impl/pure/processor_cached.go +++ b/internal/impl/pure/processor_cached.go @@ -29,8 +29,8 @@ func newCachedProcessorConfigSpec() *service.ConfigSpec { Description("A key to be resolved for each message, if the key already exists in the cache then the cached result is used, otherwise the processors are applied and the result is cached under this key. The key could be static and therefore apply generally to all messages or it could be an interpolated expression that is potentially unique for each message."). Example("my_foo_result"). Example(`${! this.document.id }`). - Example(`${! meta("kafka_key") }`). - Example(`${! meta("kafka_topic") }`)). + Example(`${! metadata("kafka_key") }`). + Example(`${! metadata("kafka_topic") }`)). Field(service.NewInterpolatedStringField("ttl"). Description("An optional expiry period to set for each cache entry. Some caches only have a general TTL and will therefore ignore this setting."). Optional()). diff --git a/internal/impl/redis/processor.go b/internal/impl/redis/processor.go index cc0278f06..1fe300966 100644 --- a/internal/impl/redis/processor.go +++ b/internal/impl/redis/processor.go @@ -31,7 +31,7 @@ performed for each message and the message contents are replaced with the result Version("1.0.0"). Example("scard"). Example("incrby"). - Example(`${! meta("command") }`). + Example(`${! metadata("command") }`). Optional()). Field(service.NewBloblangField("args_mapping"). Description("A [Bloblang mapping](/docs/guides/bloblang/about) which should evaluate to an array of values matching in size to the number of arguments required for the specified Redis command."). diff --git a/website/cookbooks/joining_streams.md b/website/cookbooks/joining_streams.md index ed174c96b..9c793b685 100644 --- a/website/cookbooks/joining_streams.md +++ b/website/cookbooks/joining_streams.md @@ -189,7 +189,7 @@ input: # because both topics are consumed independently and these processors # only apply to the 'comments_retry' input. - sleep: - duration: '${! 3600 - ( timestamp_unix() - meta("last_attempted").number() ) }s' + duration: '${! 3600 - ( timestamp_unix() - metadata("last_attempted") ) }s' pipeline: processors: @@ -216,7 +216,7 @@ pipeline: output: kafka: addresses: [ TODO ] - topic: '${!meta("output_topic")}' + topic: '${!metadata("output_topic")}' cache_resources: - label: hydration_cache diff --git a/website/cookbooks/kafka_topic_mirroring.md b/website/cookbooks/kafka_topic_mirroring.md index 8aa089167..fefffc1d0 100644 --- a/website/cookbooks/kafka_topic_mirroring.md +++ b/website/cookbooks/kafka_topic_mirroring.md @@ -52,7 +52,7 @@ Using [string interpolation][bloblang.interpolation], we can then extract the or output: kafka_franz: seed_brokers: [ TODO ] - topic: 'output-${! meta("kafka_topic") }' + topic: 'output-${! metadata("kafka_topic") }' ``` Recall from earlier that we also wanted to preserve our partition mapping when writing to new topics. Again, we can use metadata to retrieve the original partition of each message in the source topic. We'll use the `kafka_partition` metadata field in conjunction with setting `partitioner` to `manual` -- overriding any other fancy partitioning algorithm in favour of preserving our initial mapping. Combining again with [string interpolation][bloblang.interpolation], we get the following: @@ -61,8 +61,8 @@ Recall from earlier that we also wanted to preserve our partition mapping when w output: kafka_franz: seed_brokers: [ TODO ] - topic: 'output-${! meta("kafka_topic") }' - partition: ${! meta("kafka_partition") } + topic: 'output-${! metadata("kafka_topic") }' + partition: ${! metadata("kafka_partition") } partitioner: manual ``` @@ -77,8 +77,8 @@ For completeness, we can also route all consumed events back to their original s output: kafka_franz: seed_brokers: [ TODO ] - topic: ${! meta("kafka_topic") } - partition: ${! meta("kafka_partition") } + topic: ${! metadata("kafka_topic") } + partition: ${! metadata("kafka_partition") } partitioner: manual ``` diff --git a/website/docs/components/about.md b/website/docs/components/about.md index 00976d592..7fc4a12bd 100644 --- a/website/docs/components/about.md +++ b/website/docs/components/about.md @@ -31,7 +31,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' ``` These are the main components within Bento and they provide the majority of useful behaviour. diff --git a/website/docs/components/metrics/about.md b/website/docs/components/metrics/about.md index fc58cc53f..da37f2a4f 100644 --- a/website/docs/components/metrics/about.md +++ b/website/docs/components/metrics/about.md @@ -166,7 +166,7 @@ metrics: meta = deleted() # Re-add the `label` label with meows replaced with woofs - meta label = meta("label").replace("meow", "woof") + meta label = metadata("label").replace("meow", "woof") # Delete all metric series that aren't in our list root = if ![ diff --git a/website/docs/components/outputs/about.md b/website/docs/components/outputs/about.md index f0eefb84b..5f8085991 100644 --- a/website/docs/components/outputs/about.md +++ b/website/docs/components/outputs/about.md @@ -13,7 +13,7 @@ output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' # Optional list of processing steps processors: @@ -60,7 +60,7 @@ For example, multiplexing against Kafka topics is a common pattern: output: kafka: addresses: [ TODO:6379 ] - topic: ${! meta("target_topic") } + topic: ${! metadata("target_topic") } ``` Refer to the field documentation for a given output to see if it support interpolation. diff --git a/website/docs/components/outputs/amqp_0_9.md b/website/docs/components/outputs/amqp_0_9.md index 6671cb6dd..b384b2263 100644 --- a/website/docs/components/outputs/amqp_0_9.md +++ b/website/docs/components/outputs/amqp_0_9.md @@ -274,7 +274,7 @@ Default: `""` priority: "0" -priority: ${! meta("amqp_priority") } +priority: ${! metadata("amqp_priority") } priority: ${! json("doc.priority") } ``` diff --git a/website/docs/components/outputs/azure_table_storage.md b/website/docs/components/outputs/azure_table_storage.md index ad056bc93..812083994 100644 --- a/website/docs/components/outputs/azure_table_storage.md +++ b/website/docs/components/outputs/azure_table_storage.md @@ -39,7 +39,7 @@ output: storage_access_key: "" storage_connection_string: "" storage_sas_token: "" - table_name: ${! meta("kafka_topic") } # No default (required) + table_name: ${! metadata("kafka_topic") } # No default (required) partition_key: "" row_key: "" properties: {} @@ -63,7 +63,7 @@ output: storage_access_key: "" storage_connection_string: "" storage_sas_token: "" - table_name: ${! meta("kafka_topic") } # No default (required) + table_name: ${! metadata("kafka_topic") } # No default (required) partition_key: "" row_key: "" properties: {} @@ -169,7 +169,7 @@ Type: `string` ```yml # Examples -table_name: ${! meta("kafka_topic") } +table_name: ${! metadata("kafka_topic") } table_name: ${! json("table") } ``` @@ -228,7 +228,7 @@ Options: `INSERT`, `INSERT_MERGE`, `INSERT_REPLACE`, `UPDATE_MERGE`, `UPDATE_REP transaction_type: ${! json("operation") } -transaction_type: ${! meta("operation") } +transaction_type: ${! metadata("operation") } transaction_type: INSERT ``` diff --git a/website/docs/components/outputs/kafka_franz.md b/website/docs/components/outputs/kafka_franz.md index 57d819853..0a9cc63d3 100644 --- a/website/docs/components/outputs/kafka_franz.md +++ b/website/docs/components/outputs/kafka_franz.md @@ -38,7 +38,7 @@ output: seed_brokers: [] # No default (required) topic: "" # No default (required) key: "" # No default (optional) - partition: ${! meta("partition") } # No default (optional) + partition: ${! metadata("partition") } # No default (optional) metadata: include_prefixes: [] include_patterns: [] @@ -62,7 +62,7 @@ output: topic: "" # No default (required) key: "" # No default (optional) partitioner: "" # No default (optional) - partition: ${! meta("partition") } # No default (optional) + partition: ${! metadata("partition") } # No default (optional) client_id: bento rack_id: "" idempotent_write: true @@ -162,7 +162,7 @@ Type: `string` ```yml # Examples -partition: ${! meta("partition") } +partition: ${! metadata("partition") } ``` ### `client_id` diff --git a/website/docs/components/outputs/nats_jetstream.md b/website/docs/components/outputs/nats_jetstream.md index f259e1346..0d84a5ea6 100644 --- a/website/docs/components/outputs/nats_jetstream.md +++ b/website/docs/components/outputs/nats_jetstream.md @@ -144,7 +144,7 @@ Type: `string` subject: foo.bar.baz -subject: ${! meta("kafka_topic") } +subject: ${! metadata("kafka_topic") } subject: foo.${! json("meta.type") } ``` diff --git a/website/docs/components/processors/cached.md b/website/docs/components/processors/cached.md index 399cdb55b..98c710fb4 100644 --- a/website/docs/components/processors/cached.md +++ b/website/docs/components/processors/cached.md @@ -136,9 +136,9 @@ key: my_foo_result key: ${! this.document.id } -key: ${! meta("kafka_key") } +key: ${! metadata("kafka_key") } -key: ${! meta("kafka_topic") } +key: ${! metadata("kafka_topic") } ``` ### `ttl` diff --git a/website/docs/components/processors/nats_request_reply.md b/website/docs/components/processors/nats_request_reply.md index ee8d6e878..87e13c808 100644 --- a/website/docs/components/processors/nats_request_reply.md +++ b/website/docs/components/processors/nats_request_reply.md @@ -161,7 +161,7 @@ Type: `string` subject: foo.bar.baz -subject: ${! meta("kafka_topic") } +subject: ${! metadata("kafka_topic") } subject: foo.${! json("meta.type") } ``` diff --git a/website/docs/components/processors/redis.md b/website/docs/components/processors/redis.md index a56c8bced..ee8673838 100644 --- a/website/docs/components/processors/redis.md +++ b/website/docs/components/processors/redis.md @@ -330,7 +330,7 @@ command: scard command: incrby -command: ${! meta("command") } +command: ${! metadata("command") } ``` ### `args_mapping` diff --git a/website/docs/components/processors/schema_registry_encode.md b/website/docs/components/processors/schema_registry_encode.md index b01da1b20..19c2f35ac 100644 --- a/website/docs/components/processors/schema_registry_encode.md +++ b/website/docs/components/processors/schema_registry_encode.md @@ -136,7 +136,7 @@ Type: `string` subject: foo -subject: ${! meta("kafka_topic") } +subject: ${! metadata("kafka_topic") } ``` ### `refresh_period` diff --git a/website/docs/configuration/about.md b/website/docs/configuration/about.md index 587ceec7d..ef979cf20 100644 --- a/website/docs/configuration/about.md +++ b/website/docs/configuration/about.md @@ -33,7 +33,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' ``` @@ -62,7 +62,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' input_resources: [] cache_resources: [] diff --git a/website/docs/configuration/interpolation.md b/website/docs/configuration/interpolation.md index 7b564b6cc..7993b4096 100644 --- a/website/docs/configuration/interpolation.md +++ b/website/docs/configuration/interpolation.md @@ -49,8 +49,8 @@ A common usecase for interpolated functions is dynamic routing at the output lev output: kafka: addresses: [ TODO ] - topic: ${! meta("output_topic") } - key: ${! meta("key") } + topic: ${! metadata("output_topic") } + key: ${! metadata("key") } ``` ### Coalesce and Mapping diff --git a/website/docs/configuration/metadata.md b/website/docs/configuration/metadata.md index 55464b7e0..ec795c0f6 100644 --- a/website/docs/configuration/metadata.md +++ b/website/docs/configuration/metadata.md @@ -36,13 +36,17 @@ meta = @.filter(kv -> !kv.key.has_prefix("kafka_")) ## Using Metadata +:::caution +There are two functions to reference metadata: [`meta()`][meta] and [`metadata()`][metadata]. [`meta()`][meta] has been depreciated in favor of [`metadata()`][metadata]. +::: + Metadata values can be referenced in any field that supports [interpolation functions][interpolation]. For example, you can route messages to Kafka topics using interpolation of metadata keys: ```yaml output: kafka: addresses: [ TODO ] - topic: ${! meta("target_topic") } + topic: ${! metadata("target_topic") } ``` Bento also allows you to conditionally process messages based on their metadata with the [`switch` processor][processors.switch]: @@ -76,7 +80,7 @@ For example, if we were sending messages to kafka using a metadata key `target_t output: kafka: addresses: [ TODO ] - topic: ${! meta("target_topic") } + topic: ${! metadata("target_topic") } metadata: exclude_prefixes: - target_topic @@ -102,7 +106,7 @@ pipeline: output: kafka: addresses: [ TODO ] - topic: ${! meta("_target_topic") } + topic: ${! metadata("_target_topic") } metadata: exclude_prefixes: [ "_" ] ``` @@ -111,3 +115,5 @@ output: [processors.switch]: /docs/components/processors/switch [processors.mapping]: /docs/components/processors/mapping [guides.bloblang]: /docs/guides/bloblang/about +[meta]: /docs/guides/bloblang/functions#meta +[metadata]: /docs/guides/bloblang/functions#metadata diff --git a/website/docs/configuration/unit_testing.md b/website/docs/configuration/unit_testing.md index 535201fc3..ad3fc68ab 100644 --- a/website/docs/configuration/unit_testing.md +++ b/website/docs/configuration/unit_testing.md @@ -37,7 +37,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '${! meta("kafka_topic") }/${! json("message.id") }.json' + path: '${! metadata("kafka_topic") }/${! json("message.id") }.json' ``` One way to write our unit tests for this config is to accompany it with a file of the same name and extension but suffixed with `_bento_test`, which in this case would be `foo_bento_test.yaml`. diff --git a/website/docs/guides/streams_mode/about.md b/website/docs/guides/streams_mode/about.md index 59e1455b6..42d85cdfd 100644 --- a/website/docs/guides/streams_mode/about.md +++ b/website/docs/guides/streams_mode/about.md @@ -54,7 +54,7 @@ This can cause problems if your streams are short lived and uniquely named as th ```yaml # Only register metrics for the stream `foo`. Others will be ignored. metrics: - mapping: if meta("stream") != "foo" { deleted() } + mapping: if metadata("stream") != "foo" { deleted() } prometheus: {} ``` diff --git a/website/src/pages/index.tsx b/website/src/pages/index.tsx index 86709417b..20061319c 100755 --- a/website/src/pages/index.tsx +++ b/website/src/pages/index.tsx @@ -170,7 +170,7 @@ pipeline: output: aws_s3: bucket: TODO - path: '\${! meta("partition") }/\${! timestamp_unix_nano() }.tar.gz' + path: '\${! metadata("partition") }/\${! timestamp_unix_nano() }.tar.gz' batching: count: 100 period: 10s