From 49fbf2a7c90941fe74833a229fb3060cdb56413f Mon Sep 17 00:00:00 2001 From: Jast <745925668@qq.com> Date: Mon, 6 May 2024 21:38:07 +0800 Subject: [PATCH] [featrue] add apache hdfs monitor (#1920) Co-authored-by: zhangshenghang Co-authored-by: zhangshenghang Co-authored-by: crossoverJie Co-authored-by: yqxxgh <42080876+yqxxgh@users.noreply.github.com> Co-authored-by: Ceilzcx <48920254+Ceilzcx@users.noreply.github.com> Co-authored-by: aias00 Co-authored-by: tomsun28 --- home/docs/help/hdfs_datanode.md | 56 ++ home/docs/help/hdfs_namenode.md | 92 ++++ .../current/help/hdfs_datanode.md | 56 ++ .../current/help/hdfs_namenode.md | 93 ++++ home/sidebars.json | 2 + .../resources/define/app-hdfs_datanode.yml | 248 +++++++++ .../resources/define/app-hdfs_namenode.yml | 512 ++++++++++++++++++ 7 files changed, 1059 insertions(+) create mode 100644 home/docs/help/hdfs_datanode.md create mode 100644 home/docs/help/hdfs_namenode.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md create mode 100644 manager/src/main/resources/define/app-hdfs_datanode.yml create mode 100644 manager/src/main/resources/define/app-hdfs_namenode.yml diff --git a/home/docs/help/hdfs_datanode.md b/home/docs/help/hdfs_datanode.md new file mode 100644 index 00000000000..2e09fb9fba1 --- /dev/null +++ b/home/docs/help/hdfs_datanode.md @@ -0,0 +1,56 @@ +--- +id: hdfs_datanode +title: Monitoring Apache HDFS DataNode Monitoring +sidebar_label: Apache HDFS DataNode +keywords: [big data monitoring system, distributed file system monitoring, Apache HDFS DataNode monitoring] +--- + +> Hertzbeat monitors metrics for Apache HDFS DataNode nodes. + +**Protocol Used: HTTP** + +## Pre-monitoring Operations + +Retrieve the HTTP monitoring port for the Apache HDFS DataNode. Value: `dfs.datanode.http.address` + +## Configuration Parameters + +| Parameter Name | Parameter Description | +| ----------------- |-------------------------------------------------------| +| Target Host | IP(v4 or v6) or domain name of the target to be monitored. Exclude protocol. | +| Port | Monitoring port number for Apache HDFS DataNode, default is 50075. | +| Query Timeout | Timeout for querying Apache HDFS DataNode, in milliseconds, default is 6000 milliseconds. | +| Metrics Collection Interval | Time interval for monitoring data collection, in seconds, minimum interval is 30 seconds. | +| Probe Before Monitoring | Whether to probe and check monitoring availability before adding. | +| Description/Remarks | Additional description and remarks for this monitoring. | + +### Metrics Collected + +#### Metric Set: FSDatasetState + +| Metric Name | Metric Unit | Metric Description | +| ------------ | ----------- | ------------------------------ | +| DfsUsed | GB | DataNode HDFS usage | +| Remaining | GB | Remaining space on DataNode HDFS | +| Capacity | GB | Total capacity of DataNode HDFS | + +#### Metric Set: JvmMetrics + +| Metric Name | Metric Unit | Metric Description | +| ---------------------- | ----------- | ----------------------------------------- | +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed size of NonHeapMemory configured in JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed size of HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum size of HeapMemory configured in JVM | +| MemMaxM | MB | Maximum memory available for JVM at runtime | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state | + +#### Metric Set: runtime + +| Metric Name | Metric Unit | Metric Description | +| ------------ | ----------- | ------------------ | +| StartTime | | Startup time | diff --git a/home/docs/help/hdfs_namenode.md b/home/docs/help/hdfs_namenode.md new file mode 100644 index 00000000000..1afd6d4b1ae --- /dev/null +++ b/home/docs/help/hdfs_namenode.md @@ -0,0 +1,92 @@ +--- +id: hdfs_namenode +title: Monitoring HDFS NameNode Monitoring +sidebar_label: Apache HDFS NameNode +keywords: [big data monitoring system, distributed file system monitoring, HDFS NameNode monitoring] +--- + +> Hertzbeat monitors metrics for HDFS NameNode nodes. + +**Protocol Used: HTTP** + +## Pre-Monitoring Actions + +Ensure that you have obtained the JMX monitoring port for the HDFS NameNode. + +## Configuration Parameters + +| Parameter Name | Parameter Description | +| ------------------ |--------------------------------------------------------| +| Target Host | The IPv4, IPv6, or domain name of the target being monitored. Exclude protocol headers. | +| Port | The monitoring port number of the HDFS NameNode, default is 50070. | +| Query Timeout | Timeout for querying the HDFS NameNode, in milliseconds, default is 6000 milliseconds. | +| Metrics Collection Interval | Time interval for collecting monitoring data, in seconds, minimum interval is 30 seconds. | +| Probe Before Monitoring | Whether to probe and check the availability of monitoring before adding it. | +| Description/Remarks | Additional description and remarks for this monitoring. | + +### Collected Metrics + +#### Metric Set: FSNamesystem + +| Metric Name | Metric Unit | Metric Description | +| --------------------------- | ----------- | ------------------------------------- | +| CapacityTotal | | Total cluster storage capacity | +| CapacityTotalGB | GB | Total cluster storage capacity | +| CapacityUsed | | Used cluster storage capacity | +| CapacityUsedGB | GB | Used cluster storage capacity | +| CapacityRemaining | | Remaining cluster storage capacity | +| CapacityRemainingGB | GB | Remaining cluster storage capacity | +| CapacityUsedNonDFS | | Non-HDFS usage of cluster capacity | +| TotalLoad | | Total client connections in the cluster | +| FilesTotal | | Total number of files in the cluster | +| BlocksTotal | | Total number of BLOCKs | +| PendingReplicationBlocks | | Number of blocks awaiting replication | +| UnderReplicatedBlocks | | Number of blocks with insufficient replicas | +| CorruptBlocks | | Number of corrupt blocks | +| ScheduledReplicationBlocks | | Number of blocks scheduled for replication | +| PendingDeletionBlocks | | Number of blocks awaiting deletion | +| ExcessBlocks | | Number of excess blocks | +| PostponedMisreplicatedBlocks| | Number of misreplicated blocks postponed for processing | +| NumLiveDataNodes | | Number of live data nodes in the cluster | +| NumDeadDataNodes | | Number of data nodes marked as dead | +| NumDecomLiveDataNodes | | Number of decommissioned live nodes | +| NumDecomDeadDataNodes | | Number of decommissioned dead nodes | +| NumDecommissioningDataNodes | | Number of nodes currently being decommissioned | +| TransactionsSinceLastCheckpoint | | Number of transactions since the last checkpoint | +| LastCheckpointTime | | Time of the last checkpoint | +| PendingDataNodeMessageCount| | Number of DATANODE requests queued in the standby namenode | + +#### Metric Set: RPC + +| Metric Name | Metric Unit | Metric Description | +| ------------------------- | ----------- | -------------------------- | +| ReceivedBytes | | Data receiving rate | +| SentBytes | | Data sending rate | +| RpcQueueTimeNumOps | | RPC call rate | + +#### Metric Set: runtime + +| Metric Name | Metric Unit | Metric Description | +| ------------------------- | ----------- | ------------------- | +| StartTime | | Start time | + +#### Metric Set: JvmMetrics + +| Metric Name | Metric Unit | Metric Description | +| ------------------------- | ----------- | ------------------- | +| MemNonHeapUsedM | MB | Current usage of NonHeapMemory by JVM | +| MemNonHeapCommittedM | MB | Committed NonHeapMemory by JVM | +| MemHeapUsedM | MB | Current usage of HeapMemory by JVM | +| MemHeapCommittedM | MB | Committed HeapMemory by JVM | +| MemHeapMaxM | MB | Maximum HeapMemory configured for JVM | +| MemMaxM | MB | Maximum memory that can be used by JVM | +| GcCountParNew | Count | Number of ParNew GC events | +| GcTimeMillisParNew | Milliseconds| Time spent in ParNew GC | +| GcCountConcurrentMarkSweep| Count | Number of ConcurrentMarkSweep GC events| +| GcTimeMillisConcurrentMarkSweep | Milliseconds | Time spent in ConcurrentMarkSweep GC | +| GcCount | Count | Total number of GC events | +| GcTimeMillis | Milliseconds| Total time spent in GC events | +| ThreadsRunnable | Count | Number of threads in RUNNABLE state | +| ThreadsBlocked | Count | Number of threads in BLOCKED state | +| ThreadsWaiting | Count | Number of threads in WAITING state | +| ThreadsTimedWaiting | Count | Number of threads in TIMED WAITING state| diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md new file mode 100644 index 00000000000..efb05494290 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_datanode.md @@ -0,0 +1,56 @@ +--- +id: hdfs_datanode +title: 监控:Apache HDFS DataNode监控 +sidebar_label: Apache HDFS DataNode +keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS DataNode监控] +--- + +> Hertzbeat 对 Apache HDFS DataNode 节点监控指标进行监控。 + +**使用协议:HTTP** + +## 监控前操作 + +获取 Apache HDFS DataNode 的 HTTP 监控端口。 取值:`dfs.datanode.http.address` + +## 配置参数 + +| 参数名称 | 参数帮助描述 | +| ---------------- |---------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | Apache HDFS DataNode 的监控端口号,默认为50075。 | +| 查询超时时间 | 查询 Apache HDFS DataNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | + +### 采集指标 + +#### 指标集合:FSDatasetState + +| 指标名称 | 指标单位 | 指标帮助描述 | +| -------------------------- | -------- | ------------------------------------ | +| DfsUsed | GB | DataNode HDFS使用量 | +| Remaining | GB | DataNode HDFS剩余空间 | +| Capacity | GB | DataNode HDFS空间总量 | + +#### 指标集合:JvmMetrics + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ------------------------ | -------- | ------------------------------------ | +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| ThreadsRunnable | 个 | 处于 RUNNABLE 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | + +#### 指标集合:runtime + +| 指标名称 | 指标单位 | 指标帮助描述 | +| --------------------| -------- | ----------------- | +| StartTime | | 启动时间 | \ No newline at end of file diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md new file mode 100644 index 00000000000..26fd5e985af --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hdfs_namenode.md @@ -0,0 +1,93 @@ +--- +id: hdfs_namenode +title: 监控:Apache HDFS NameNode监控 +sidebar_label: Apache HDFS NameNode +keywords: [大数据监控系统, 分布式文件系统监控, Apache HDFS NameNode监控] +--- + +> Hertzbeat 对 Apache HDFS NameNode 节点监控指标进行监控。 + +**使用协议:HTTP** + +## 监控前操作 + +获取 Apache HDFS NameNode 的 HTTP 监控端口。取值:`dfs.namenode.http-address` + +## 配置参数 + +| 参数名称 | 参数帮助描述 | +| ---------------- |---------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。不带协议头。 | +| 端口 | HDFS NameNode 的监控端口号,默认为50070。 | +| 查询超时时间 | 查询 HDFS NameNode 的超时时间,单位毫秒,默认6000毫秒。 | +| 指标采集间隔 | 监控数据采集的时间间隔,单位秒,最小间隔为30秒。 | +| 是否探测 | 新增监控前是否先探测检查监控可用性。 | +| 描述备注 | 此监控的更多描述和备注信息。 | + +### 采集指标 + +#### 指标集合:FSNamesystem + +| 指标名称 | 指标单位 | 指标帮助描述 | +| -------------------------- | -------- | ------------------------------------ | +| CapacityTotal | | 集群存储总容量 | +| CapacityTotalGB | GB | 集群存储总容量 | +| CapacityUsed | | 集群存储已使用容量 | +| CapacityUsedGB | GB | 集群存储已使用容量 | +| CapacityRemaining | | 集群存储剩余容量 | +| CapacityRemainingGB | GB | 集群存储剩余容量 | +| CapacityUsedNonDFS | | 集群非 HDFS 使用容量 | +| TotalLoad | | 整个集群的客户端连接数 | +| FilesTotal | | 集群文件总数量 | +| BlocksTotal | | 总 BLOCK 数量 | +| PendingReplicationBlocks | | 等待被备份的块数量 | +| UnderReplicatedBlocks | | 副本数不够的块数量 | +| CorruptBlocks | | 坏块数量 | +| ScheduledReplicationBlocks | | 安排要备份的块数量 | +| PendingDeletionBlocks | | 等待被删除的块数量 | +| ExcessBlocks | | 多余的块数量 | +| PostponedMisreplicatedBlocks | | 被推迟处理的异常块数量 | +| NumLiveDataNodes | | 活的数据节点数量 | +| NumDeadDataNodes | | 已经标记为 Dead 状态的数据节点数量 | +| NumDecomLiveDataNodes | | 下线且 Live 的节点数量 | +| NumDecomDeadDataNodes | | 下线且 Dead 的节点数量 | +| NumDecommissioningDataNodes | | 正在下线的节点数量 | +| TransactionsSinceLastCheckpoint | | 从上次Checkpoint之后的事务数量 | +| LastCheckpointTime | | 上一次Checkpoint时间 | +| PendingDataNodeMessageCount | | DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 | + +#### 指标集合:RPC + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ------------------- | -------- | ---------------------- | +| ReceivedBytes | | 接收数据速率 | +| SentBytes | | 发送数据速率 | +| RpcQueueTimeNumOps | | RPC 调用速率 | + +#### 指标集合:runtime + +| 指标名称 | 指标单位 | 指标帮助描述 | +| --------------------| -------- | ----------------- | +| StartTime | | 启动时间 | + +#### 指标集合:JvmMetrics + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ------------------------ | -------- | ---------------- | +| MemNonHeapUsedM | MB | JVM 当前已经使用的 NonHeapMemory 的大小 | +| MemNonHeapCommittedM | MB | JVM 配置的 NonHeapCommittedM 的大小 | +| MemHeapUsedM | MB | JVM 当前已经使用的 HeapMemory 的大小 | +| MemHeapCommittedM | MB | JVM HeapMemory 提交大小 | +| MemHeapMaxM | MB | JVM 配置的 HeapMemory 的大小 | +| MemMaxM | MB | JVM 运行时可以使用的最大内存大小 | +| GcCountParNew | 次 | 新生代GC消耗时间 | +| GcTimeMillisParNew | 毫秒 | 新生代GC消耗时间 | +| GcCountConcurrentMarkSweep | 毫秒 | 老年代GC次数 | +| GcTimeMillisConcurrentMarkSweep | 个 | 老年代GC消耗时间 | +| GcCount | 个 | GC次数 | +| GcTimeMillis | 个 | GC消耗时间 | +| ThreadsRunnable | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsBlocked | 个 | 处于 BLOCKED 状态的线程数量 | +| ThreadsWaiting | 个 | 处于 WAITING 状态的线程数量 | +| ThreadsTimedWaiting | 个 | 处于 TIMED WAITING 状态的线程数量 | + diff --git a/home/sidebars.json b/home/sidebars.json index 3f7d76783e2..69e484d189c 100755 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -221,6 +221,8 @@ "help/hadoop", "help/hbase_master", "help/hbase_regionserver", + "help/hdfs_namenode", + "help/hdfs_datanode", "help/iotdb", "help/hive", "help/airflow", diff --git a/manager/src/main/resources/define/app-hdfs_datanode.yml b/manager/src/main/resources/define/app-hdfs_datanode.yml new file mode 100644 index 00000000000..f20bc4e45c3 --- /dev/null +++ b/manager/src/main/resources/define/app-hdfs_datanode.yml @@ -0,0 +1,248 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: bigdata +# The monitoring type eg: linux windows tomcat mysql aws... +app: hdfs_datanode +# The monitoring i18n name +name: + zh-CN: Apache HDFS DataNode + en-US: Apache HDFS DataNode +# The description and help of this monitoring type +help: + zh-CN: Hertzbeat 对 HDFS DataNode 节点监控指标进行监控。
您可以点击 “新建 Apache HDFS DataNode” 并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: Hertzbeat monitors the HDFS DataNode metrics.
You can click "New Apache HDFS DataNode" to configure, or select "More Actions" to import an existing configuration. + zh-TW: Hertzbeat 對 HDFS DataNode 節點監控指標進行監控。
您可以點擊 “新建 Apache HDFS DataNode” 並進行配置,或者選擇“更多操作”,導入已有配置。 + +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/hdfs_datanode/ + en-US: https://hertzbeat.apache.org/docs/help/hdfs_datanode/ +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 50075 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 查询超时时间 + en-US: Query Timeout + # type-param field type(most mapping the html input type) + type: number + # required-true or false + required: false + # hide param-true or false + hide: true + # default value + defaultValue: 6000 +# collect metrics config list +metrics: + # metrics - Server + - name: FSDatasetState + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: DfsUsed + type: 0 + unit: 'GB' + i18n: + zh-CN: DataNode HDFS使用量 + en-US: DfsUsed + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Remaining + type: 0 + unit: 'GB' + i18n: + zh-CN: DataNode HDFS剩余空间 + en-US: Remaining + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Capacity + type: 0 + unit: 'GB' + i18n: + zh-CN: DataNode HDFS空间总量 + en-US: Capacity + units: + - DfsUsed=B->GB + - Remaining=B->GB + - Capacity=B->GB + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.DfsUsed + - $.Remaining + - $.Capacity + calculates: + - DfsUsed=$.DfsUsed + - Remaining=$.Remaining + - Capacity=$.Capacity + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=DataNode,name=FSDatasetState")]' + - name: JvmMetrics + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapUsedM + type: 0 + i18n: + zh-CN: JVM 当前已经使用的 NonHeapMemory 的大小 + en-US: MemNonHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapCommittedM + type: 0 + i18n: + zh-CN: JVM 配置的 NonHeapCommittedM 的大小 + en-US: MemNonHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapUsedM + type: 0 + i18n: + zh-CN: JVM 当前已经使用的 HeapMemory 的大小 + en-US: MemHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapCommittedM + type: 0 + i18n: + zh-CN: JVM HeapMemory 提交大小 + en-US: MemHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapMaxM + type: 0 + i18n: + zh-CN: JVM 配置的 HeapMemory 的大小 + en-US: MemHeapMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemMaxM + type: 0 + i18n: + zh-CN: JVM 运行时可以使用的最大内存大小 + en-US: MemMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsRunnable + type: 0 + i18n: + zh-CN: 处于 RUNNABLE 状态的线程数量 + en-US: ThreadsRunnable + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsBlocked + type: 0 + i18n: + zh-CN: 处于 BLOCKED 状态的线程数量 + en-US: ThreadsBlocked + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsWaiting + type: 0 + i18n: + zh-CN: 处于 WAITING 状态的线程数量 + en-US: ThreadsWaiting + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsTimedWaiting + type: 0 + i18n: + zh-CN: 处于 TIMED WAITING 状态的线程数量 + en-US: ThreadsTimedWaiting + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.MemNonHeapUsedM + - $.MemNonHeapCommittedM + - $.MemHeapUsedM + - $.MemHeapCommittedM + - $.MemHeapMaxM + - $.MemMaxM + - $.ThreadsRunnable + - $.ThreadsBlocked + - $.ThreadsWaiting + - $.ThreadsTimedWaiting + calculates: + - MemNonHeapUsedM=$.MemNonHeapUsedM + - MemNonHeapCommittedM=$.MemNonHeapCommittedM + - MemHeapUsedM=$.MemHeapUsedM + - MemHeapCommittedM=$.MemHeapCommittedM + - MemHeapMaxM=$.MemHeapMaxM + - MemMaxM=$.MemMaxM + - ThreadsRunnable=$.ThreadsRunnable + - ThreadsBlocked=$.ThreadsBlocked + - ThreadsWaiting=$.ThreadsWaiting + - ThreadsTimedWaiting=$.ThreadsTimedWaiting + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=DataNode,name=JvmMetrics")]' + - name: runtime + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: StartTime + type: 1 + i18n: + zh-CN: 启动时间 + en-US: StartTime + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.beans[?(@.name == "java.lang:type=Runtime")].StartTime + calculates: + - StartTime=$.beans[?(@.name == "java.lang:type=Runtime")].StartTime + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$' \ No newline at end of file diff --git a/manager/src/main/resources/define/app-hdfs_namenode.yml b/manager/src/main/resources/define/app-hdfs_namenode.yml new file mode 100644 index 00000000000..fcd8ce36e31 --- /dev/null +++ b/manager/src/main/resources/define/app-hdfs_namenode.yml @@ -0,0 +1,512 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: bigdata +# The monitoring type eg: linux windows tomcat mysql aws... +app: hdfs_namenode +# The monitoring i18n name +name: + zh-CN: Apache HDFS NameNode + en-US: Apache HDFS NameNode +# The description and help of this monitoring type +help: + zh-CN: Hertzbeat 对 HDFS NameNode 节点监控指标进行监控。
您可以点击 “新建 Apache HDFS NameNode” 并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: Hertzbeat monitors the HDFS NameNode metrics.
You can click "New Apache HDFS NameNode" to configure, or select "More Actions" to import an existing configuration. + zh-TW: Hertzbeat 對 HDFS NameNode 節點監控指標進行監控。
您可以點擊 “新建 Apache HDFS NameNode” 並進行配置,或者選擇“更多操作”,導入已有配置。 + +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/hdfs_namenode/ + en-US: https://hertzbeat.apache.org/docs/help/hdfs_namenode/ +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 50070 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 查询超时时间 + en-US: Query Timeout + # type-param field type(most mapping the html input type) + type: number + # required-true or false + required: false + # hide param-true or false + hide: true + # default value + defaultValue: 6000 +# collect metrics config list +metrics: + # metrics - Server + - name: FSNamesystem + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityTotal + type: 0 + i18n: + zh-CN: 集群存储总容量 + en-US: CapacityTotal + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityTotalGB + type: 0 + unit: 'GB' + i18n: + zh-CN: 集群存储总容量 + en-US: CapacityTotalGB + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityUsed + type: 0 + i18n: + zh-CN: 集群存储已使用容量 + en-US: CapacityUsed + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityUsedGB + type: 0 + i18n: + zh-CN: 集群存储已使用容量 + unit: 'GB' + en-US: CapacityUsedGB + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityRemaining + type: 0 + i18n: + zh-CN: 集群存储剩余容量 + en-US: CapacityRemaining + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityRemainingGB + type: 0 + unit: 'GB' + i18n: + zh-CN: 集群存储剩余容量 + en-US: CapacityRemainingGB + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CapacityUsedNonDFS + type: 0 + i18n: + zh-CN: 集群非 HDFS 使用容量 + en-US: CapacityUsedNonDFS + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: TotalLoad + type: 0 + i18n: + zh-CN: 整个集群的客户端连接数 + en-US: TotalLoad + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: FilesTotal + type: 0 + i18n: + zh-CN: 集群文件总数量 + en-US: FilesTotal + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: BlocksTotal + type: 0 + i18n: + zh-CN: 总 BLOCK 数量 + en-US: BlocksTotal + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: PendingReplicationBlocks + type: 0 + i18n: + zh-CN: 等待被备份的块数量 + en-US: PendingReplicationBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: UnderReplicatedBlocks + type: 0 + i18n: + zh-CN: 副本数不够的块数量 + en-US: UnderReplicatedBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: CorruptBlocks + type: 0 + i18n: + zh-CN: 坏块数量 + en-US: CorruptBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScheduledReplicationBlocks + type: 0 + i18n: + zh-CN: 安排要备份的块数量 + en-US: ScheduledReplicationBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: PendingDeletionBlocks + type: 0 + i18n: + zh-CN: 等待被删除的块数量 + en-US: PendingDeletionBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ExcessBlocks + type: 0 + i18n: + zh-CN: 多余的块数量 + en-US: ExcessBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: PostponedMisreplicatedBlocks + type: 0 + i18n: + zh-CN: 被推迟处理的异常块数量 + en-US: PostponedMisreplicatedBlocks + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NumLiveDataNodes + type: 0 + i18n: + zh-CN: 活的数据节点数量 + en-US: NumLiveDataNodes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NumDeadDataNodes + type: 0 + i18n: + zh-CN: 已经标记为 Dead 状态的数据节点数量 + en-US: NumDeadDataNodes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NumDecomLiveDataNodes + type: 0 + i18n: + zh-CN: 下线且 Live 的节点数量 + en-US: NumDecomLiveDataNodes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NumDecomDeadDataNodes + type: 0 + i18n: + zh-CN: 下线且 Dead 的节点数量 + en-US: NumDecomDeadDataNodes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NumDecommissioningDataNodes + type: 0 + i18n: + zh-CN: 正在下线的节点数量 + en-US: NumDecommissioningDataNodes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: TransactionsSinceLastCheckpoint + type: 0 + i18n: + zh-CN: 从上次Checkpoint之后的事务数量 + en-US: TransactionsSinceLastCheckpoint + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: LastCheckpointTime + type: 0 + i18n: + zh-CN: 上一次Checkpoint时间 + en-US: LastCheckpointTime + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: PendingDataNodeMessageCount + type: 0 + i18n: + zh-CN: DATANODE 的请求被 QUEUE 在 standby namenode 中的个数 + en-US: PendingDataNodeMessageCount + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.CapacityTotal + - $.CapacityTotalGB + - $.CapacityUsed + - $.CapacityUsedGB + - $.CapacityRemaining + - $.CapacityRemainingGB + - $.CapacityUsedNonDFS + - $.TotalLoad + - $.FilesTotal + - $.BlocksTotal + - $.PendingReplicationBlocks + - $.UnderReplicatedBlocks + - $.CorruptBlocks + - $.ScheduledReplicationBlocks + - $.PendingDeletionBlocks + - $.ExcessBlocks + - $.PostponedMisreplicatedBlocks + - $.NumLiveDataNodes + - $.NumDeadDataNodes + - $.NumDecomLiveDataNodes + - $.NumDecomDeadDataNodes + - $.NumDecommissioningDataNodes + - $.TransactionsSinceLastCheckpoint + - $.LastCheckpointTime + - $.PendingDataNodeMessageCount + calculates: + - CapacityTotal=$.CapacityTotal + - CapacityTotalGB=$.CapacityTotalGB + - CapacityUsed=$.CapacityUsed + - CapacityUsedGB=$.CapacityUsedGB + - CapacityRemaining=$.CapacityRemaining + - CapacityRemainingGB=$.CapacityRemainingGB + - CapacityUsedNonDFS=$.CapacityUsedNonDFS + - TotalLoad=$.TotalLoad + - FilesTotal=$.FilesTotal + - BlocksTotal=$.BlocksTotal + - PendingReplicationBlocks=$.PendingReplicationBlocks + - UnderReplicatedBlocks=$.UnderReplicatedBlocks + - CorruptBlocks=$.CorruptBlocks + - ScheduledReplicationBlocks=$.ScheduledReplicationBlocks + - PendingDeletionBlocks=$.PendingDeletionBlocks + - ExcessBlocks=$.ExcessBlocks + - PostponedMisreplicatedBlocks=$.PostponedMisreplicatedBlocks + - NumLiveDataNodes=$.NumLiveDataNodes + - NumDeadDataNodes=$.NumDeadDataNodes + - NumDecomLiveDataNodes=$.NumDecomLiveDataNodes + - NumDecomDeadDataNodes=$.NumDecomDeadDataNodes + - NumDecommissioningDataNodes=$.NumDecommissioningDataNodes + - TransactionsSinceLastCheckpoint=$.TransactionsSinceLastCheckpoint + - LastCheckpointTime=$.LastCheckpointTime + - PendingDataNodeMessageCount=$.PendingDataNodeMessageCount + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=NameNode,name=FSNamesystem")]' + - name: RPC + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ReceivedBytes + type: 0 + i18n: + zh-CN: 接收数据速率 + en-US: ReceivedBytes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: SentBytes + type: 0 + i18n: + zh-CN: 发送数据速率 + en-US: SentBytes + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: RpcQueueTimeNumOps + type: 0 + i18n: + zh-CN: RPC 调用速率 + en-US: RpcQueueTimeNumOps + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.ReceivedBytes + - $.SentBytes + - $.RpcQueueTimeNumOps + calculates: + - ReceivedBytes=$.ReceivedBytes + - SentBytes=$.SentBytes + - RpcQueueTimeNumOps=$.RpcQueueTimeNumOps + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name =~ /Hadoop:service=NameNode,name=RpcActivityForPort80\d+/)]' +# parseScript: '$.beans[?(@.name == "Hadoop:service=NameNode,name=RpcActivityForPort8020")]' + - name: runtime + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: StartTime + type: 1 + i18n: + zh-CN: 启动时间 + en-US: StartTime + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.beans[?(@.name == "java.lang:type=Runtime")].StartTime + calculates: + - StartTime=$.beans[?(@.name == "java.lang:type=Runtime")].StartTime + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$' + + - name: JvmMetric + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapUsedM + type: 0 + i18n: + zh-CN: JVM 当前已经使用的 NonHeapMemory 的大小 + en-US: MemNonHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapCommittedM + type: 0 + i18n: + zh-CN: JVM 配置的 NonHeapCommittedM 的大小 + en-US: MemNonHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapUsedM + type: 0 + i18n: + zh-CN: JVM 当前已经使用的 HeapMemory 的大小 + en-US: MemHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapCommittedM + type: 0 + i18n: + zh-CN: JVM HeapMemory 提交大小 + en-US: MemHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapMaxM + type: 0 + i18n: + zh-CN: JVM 配置的 HeapMemory 的大小 + en-US: MemHeapMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemMaxM + type: 0 + i18n: + zh-CN: JVM 运行时可以使用的最大内存大小 + en-US: MemMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcCountParNew + type: 0 + i18n: + zh-CN: 新生代GC次数 + en-US: GcCountParNew + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcTimeMillisParNew + type: 0 + i18n: + zh-CN: 新生代GC消耗时间 + en-US: GcTimeMillisParNew + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcCountConcurrentMarkSweep + type: 0 + i18n: + zh-CN: 老年代GC次数 + en-US: GcCountConcurrentMarkSweep + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcTimeMillisConcurrentMarkSweep + type: 0 + i18n: + zh-CN: 老年代GC消耗时间 + en-US: GcTimeMillisConcurrentMarkSweep + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcCount + type: 0 + i18n: + zh-CN: GC次数 + en-US: GcCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcTimeMillis + type: 0 + i18n: + zh-CN: GC消耗时间 + en-US: GcTimeMillis + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsRunnable + type: 0 + i18n: + zh-CN: 处于 BLOCKED 状态的线程数量 + en-US: ThreadsRunnable + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsBlocked + type: 0 + i18n: + zh-CN: 处于 BLOCKED 状态的线程数量 + en-US: ThreadsBlocked + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsWaiting + type: 0 + i18n: + zh-CN: 处于 WAITING 状态的线程数量 + en-US: ThreadsWaiting + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ThreadsTimedWaiting + type: 0 + i18n: + zh-CN: 处于 TIMED WAITING 状态的线程数量 + en-US: ThreadsTimedWaiting + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.MemNonHeapUsedM + - $.MemNonHeapCommittedM + - $.MemHeapUsedM + - $.MemHeapCommittedM + - $.MemHeapMaxM + - $.MemMaxM + - $.GcCountParNew + - $.GcTimeMillisParNew + - $.GcCountConcurrentMarkSweep + - $.GcTimeMillisConcurrentMarkSweep + - $.GcCount + - $.GcTimeMillis + - $.ThreadsRunnable + - $.ThreadsBlocked + - $.ThreadsWaiting + - $.ThreadsTimedWaiting + calculates: + - MemNonHeapUsedM=$.MemNonHeapUsedM + - MemNonHeapCommittedM=$.MemNonHeapCommittedM + - MemHeapUsedM=$.MemHeapUsedM + - MemHeapCommittedM=$.MemHeapCommittedM + - MemHeapMaxM=$.MemHeapMaxM + - MemMaxM=$.MemMaxM + - GcCountParNew=$.GcCountParNew + - GcTimeMillisParNew=$.GcTimeMillisParNew + - GcCountConcurrentMarkSweep=$.GcCountConcurrentMarkSweep + - GcTimeMillisConcurrentMarkSweep=$.GcTimeMillisConcurrentMarkSweep + - GcCount=$.GcCount + - GcTimeMillis=$.GcTimeMillis + - ThreadsRunnable=$.ThreadsRunnable + - ThreadsBlocked=$.ThreadsBlocked + - ThreadsWaiting=$.ThreadsWaiting + - ThreadsTimedWaiting=$.ThreadsTimedWaiting + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=NameNode,name=JvmMetrics")]' \ No newline at end of file