From c428c6e546b4c4aa107701979d40bd9371b452b4 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 16:21:15 +0200
Subject: [PATCH 01/18] feat: Add ebpf exporter
* Refactor cgroup detection into separate file. We should be able to use the struct in different collectors
* Use go routines in perf collector to update
Signed-off-by: Mahendra Paipuri
---
Makefile | 10 +-
Makefile.common | 5 +
README.md | 2 +-
go.mod | 2 +-
pkg/collector/bpf/.gitignore | 3 +
pkg/collector/bpf/Makefile | 105 +
pkg/collector/bpf/Makefile.common | 72 +
pkg/collector/bpf/README.md | 1 +
pkg/collector/bpf/include/compiler.h | 54 +
pkg/collector/bpf/include/net_shared.h | 35 +
pkg/collector/bpf/include/vmlinux.h | 85188 +++++++++++++++++++
pkg/collector/bpf/lib/bpf_cgroup.h | 220 +
pkg/collector/bpf/lib/bpf_helpers.h | 263 +
pkg/collector/bpf/lib/bpf_path.h | 304 +
pkg/collector/bpf/lib/config.h | 19 +
pkg/collector/bpf/libbpf/bpf_core_read.h | 484 +
pkg/collector/bpf/libbpf/bpf_helper_defs.h | 4167 +
pkg/collector/bpf/libbpf/bpf_tracing.h | 921 +
pkg/collector/bpf/network/bpf_network.c | 52 +
pkg/collector/bpf/network/bpf_network.h | 117 +
pkg/collector/bpf/vfs/bpf_vfs.c | 233 +
pkg/collector/bpf/vfs/bpf_vfs.h | 272 +
pkg/collector/cgroup.go | 152 +
pkg/collector/ebpf.go | 784 +
pkg/collector/ebpf_test.go | 249 +
pkg/collector/helper.go | 60 +-
pkg/collector/ipmi.go | 6 +-
pkg/collector/kernel.go | 219 +
pkg/collector/kernel_test.go | 108 +
pkg/collector/perf.go | 47 +-
pkg/collector/perf_test.go | 6 +-
pkg/collector/regexp.go | 17 -
pkg/collector/slurm.go | 110 +-
pkg/collector/slurm_test.go | 58 +-
pkg/collector/testdata/proc.ttar | 46 +
35 files changed, 94219 insertions(+), 172 deletions(-)
create mode 100644 pkg/collector/bpf/.gitignore
create mode 100644 pkg/collector/bpf/Makefile
create mode 100644 pkg/collector/bpf/Makefile.common
create mode 100644 pkg/collector/bpf/README.md
create mode 100644 pkg/collector/bpf/include/compiler.h
create mode 100644 pkg/collector/bpf/include/net_shared.h
create mode 100644 pkg/collector/bpf/include/vmlinux.h
create mode 100644 pkg/collector/bpf/lib/bpf_cgroup.h
create mode 100644 pkg/collector/bpf/lib/bpf_helpers.h
create mode 100644 pkg/collector/bpf/lib/bpf_path.h
create mode 100644 pkg/collector/bpf/lib/config.h
create mode 100644 pkg/collector/bpf/libbpf/bpf_core_read.h
create mode 100644 pkg/collector/bpf/libbpf/bpf_helper_defs.h
create mode 100644 pkg/collector/bpf/libbpf/bpf_tracing.h
create mode 100644 pkg/collector/bpf/network/bpf_network.c
create mode 100644 pkg/collector/bpf/network/bpf_network.h
create mode 100644 pkg/collector/bpf/vfs/bpf_vfs.c
create mode 100644 pkg/collector/bpf/vfs/bpf_vfs.h
create mode 100644 pkg/collector/cgroup.go
create mode 100644 pkg/collector/ebpf.go
create mode 100644 pkg/collector/ebpf_test.go
create mode 100644 pkg/collector/kernel.go
create mode 100644 pkg/collector/kernel_test.go
delete mode 100644 pkg/collector/regexp.go
diff --git a/Makefile b/Makefile
index 4e5349ad..3694b99a 100644
--- a/Makefile
+++ b/Makefile
@@ -40,6 +40,9 @@ else
test-docker := test-docker
endif
+# Base test flags
+test-flags := -covermode=atomic -race
+
# Use CGO for api and GO for ceems_exporter.
PROMU_TEST_CONF ?= .promu-go-test.yml
ifeq ($(CGO_BUILD), 1)
@@ -67,8 +70,13 @@ else
# go test flags
coverage-file := coverage-go.out
+
+ # If running in CI add -exec sudo flags to run tests that require privileges
+ ifeq ($(CI), true)
+ test-flags := $(test-flags) -exec sudo
+ endif
endif
-test-flags := -covermode=atomic -coverprofile=$(coverage-file).tmp -race
+test-flags := $(test-flags) -coverprofile=$(coverage-file).tmp
ifeq ($(GOHOSTOS), linux)
test-e2e := test-e2e
diff --git a/Makefile.common b/Makefile.common
index 056866b0..2aab8e29 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -193,6 +193,11 @@ ifeq ($(CGO_BUILD), 1)
endif
@echo ">> building test binaries"
$(PROMU_TEST) build --prefix $(PREFIX) $(PROMU_BINARIES)
+endif
+ifeq ($(CGO_BUILD), 0)
+ @echo ">> building bpf assets"
+ $(MAKE) -C ./pkg/collector/bpf clean
+ $(MAKE) -C ./pkg/collector/bpf
endif
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
diff --git a/README.md b/README.md
index fd150f94..9256c675 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
| | |
| ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| CI/CD | [![ci](https://github.com/mahendrapaipuri/ceems/workflows/CI/badge.svg)](https://github.com/mahendrapaipuri/ceems) [![CircleCI](https://dl.circleci.com/status-badge/img/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main.svg?style=svg&circle-token=28db7268f3492790127da28e62e76b0991d59c8b)](https://dl.circleci.com/status-badge/redirect/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main) [![Coverage](https://img.shields.io/badge/Coverage-75.9%25-brightgreen)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) |
+| CI/CD | [![ci](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) [![CircleCI](https://dl.circleci.com/status-badge/img/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main.svg?style=svg&circle-token=28db7268f3492790127da28e62e76b0991d59c8b)](https://dl.circleci.com/status-badge/redirect/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main) [![Coverage](https://img.shields.io/badge/Coverage-75.9%25-brightgreen)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) |
| Docs | [![docs](https://img.shields.io/badge/docs-passing-green?style=flat&link=https://mahendrapaipuri.github.io/ceems/docs/)](https://mahendrapaipuri.github.io/ceems/) |
| Package | [![Release](https://img.shields.io/github/v/release/mahendrapaipuri/ceems.svg?include_prereleases)](https://github.com/mahendrapaipuri/ceems/releases/latest) |
| Meta | [![GitHub License](https://img.shields.io/github/license/mahendrapaipuri/ceems)](https://github.com/mahendrapaipuri/ceems) [![Go Report Card](https://goreportcard.com/badge/github.com/mahendrapaipuri/ceems)](https://goreportcard.com/report/github.com/mahendrapaipuri/ceems) [![code style](https://img.shields.io/badge/code%20style-gofmt-blue.svg)](https://pkg.go.dev/cmd/gofmt) |
diff --git a/go.mod b/go.mod
index 91400415..dd5d6666 100644
--- a/go.mod
+++ b/go.mod
@@ -4,6 +4,7 @@ go 1.22.5
require (
github.com/alecthomas/kingpin/v2 v2.4.0
+ github.com/cilium/ebpf v0.11.0
github.com/containerd/cgroups/v3 v3.0.4-0.20240117155926-c00d22e55fef
github.com/go-chi/httprate v0.14.1
github.com/go-kit/log v0.2.1
@@ -30,7 +31,6 @@ require (
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/cilium/ebpf v0.11.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/go-units v0.5.0 // indirect
diff --git a/pkg/collector/bpf/.gitignore b/pkg/collector/bpf/.gitignore
new file mode 100644
index 00000000..d93f0bdb
--- /dev/null
+++ b/pkg/collector/bpf/.gitignore
@@ -0,0 +1,3 @@
+# Ignore objs and deps
+objs/
+deps/
diff --git a/pkg/collector/bpf/Makefile b/pkg/collector/bpf/Makefile
new file mode 100644
index 00000000..5fcd097f
--- /dev/null
+++ b/pkg/collector/bpf/Makefile
@@ -0,0 +1,105 @@
+.PHONY: all clean
+.SUFFIXES:
+
+include ./Makefile.common
+
+VFSDIR := vfs/
+NETWORKDIR := network/
+BPFTESTDIR := test/
+
+NETWORK = bpf_network.o
+VFS = bpf_vfs.o bpf_vfs_v511.o bpf_vfs_v62.o
+# BPFTEST = bpf_lseek.o
+
+OBJSDIR := objs/
+DEPSDIR := deps/
+
+VFSOBJ := $(addprefix $(OBJSDIR),$(VFS))
+NETWORKOBJ := $(addprefix $(OBJSDIR),$(NETWORK))
+TESTOBJ := $(addprefix $(OBJSDIR),$(BPFTEST))
+OBJS := $(VFSOBJ) $(NETWORKOBJ) $(TESTOBJ)
+LLOBJS := $(patsubst $(OBJSDIR)%.o,$(OBJSDIR)%.ll,$(OBJS))
+DEPS := $(patsubst $(OBJSDIR)%.ll,$(DEPSDIR)%.d,$(LLOBJS))
+
+all: $(OBJS) $(DEPS)
+
+# NB: https://www.gnu.org/software/make/manual/html_node/Prerequisite-Types.html
+$(OBJS): | $(OBJSDIR)
+$(DEPS): | $(DEPSDIR)
+$(LLOBJS): | $(OBJSDIR)
+
+$(OBJSDIR):
+ mkdir $(OBJSDIR)
+
+$(DEPSDIR):
+ mkdir $(DEPSDIR)
+
+define DEFINE_VARIANT
+VAR := $1
+deps/bpf_vfs_$$(VAR).d: vfs/bpf_vfs.c
+endef
+
+# Generic build targets for each sub-dir
+
+$(eval $(call DEFINE_VARIANT,v511))
+$(eval $(call DEFINE_VARIANT,v62))
+
+# Build only for relevant architectures
+ifeq ($(BPF_TARGET_COMPILE),1)
+
+# VFSDIR
+objs/%.ll: $(VFSDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_POST_v62 -c $< -o $@
+
+objs/%_v511.ll:
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v511 -c $< -o $@
+
+objs/%_v62.ll:
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_POST_v512_PRE_v62 -c $< -o $@
+
+$(DEPSDIR)%.d: $(VFSDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_POST_v62 -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+$(DEPSDIR)%_v511.d:
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v511 -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+$(DEPSDIR)%_v62.d:
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_POST_v512_PRE_v62 -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+# NETWORKDIR
+objs/%.ll: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -c $< -o $@
+
+$(DEPSDIR)%.d: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+# BPFTESTDIR
+objs/%.ll: $(BPFTESTDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -c $< -o $@
+
+$(DEPSDIR)%.d: $(BPFTESTDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+# Remaining objects are built without mcpu=v2
+objs/%.o: objs/%.ll
+ $(LLC) $(LLC_FLAGS) -filetype=obj $< -o $@
+
+# include dependencies, see https://lists.gnu.org/archive/html/make-w32/2004-03/msg00062.html
+ifeq (,$(filter $(MAKECMDGOALS),clean run-test))
+-include $(DEPS)
+endif
+
+endif
+
+# the 'test' target is already taken
+run-test:
+ $(MAKE) -C tests test
+
+# SUBDIRS=tests
+
+clean:
+ @$(ECHO_CLEAN)
+ $(QUIET) $(foreach TARGET,$(SUBDIRS), \
+ $(MAKE) -C $(TARGET) clean)
+ $(QUIET)rm -f $(OBJSDIR)*.{o,ll,i,s}
+ $(QUIET)rm -f $(DEPSDIR)*.d
diff --git a/pkg/collector/bpf/Makefile.common b/pkg/collector/bpf/Makefile.common
new file mode 100644
index 00000000..7e2bb221
--- /dev/null
+++ b/pkg/collector/bpf/Makefile.common
@@ -0,0 +1,72 @@
+SHELL=/bin/bash # needed for the *.{o,ll,i,s} pattern in the clean target
+
+CLANG ?= clang
+LLC ?= llc
+
+# Build the BPF programs for the detected architecture, default to x86, and
+# allow easy overriding by using ?= for cross-compilation
+# UNAME_M := $(shell uname -m)
+# ifeq ($(UNAME_M),x86_64)
+# BPF_TARGET_ARCH ?= x86
+# endif
+# ifeq ($(UNAME_M),aarch64)
+# BPF_TARGET_ARCH ?= arm64
+# endif
+
+# Get cross-compiling flags from GOARCH env variable
+# Endians are picked up https://github.com/cilium/ebpf/blob/625b0a910e1ba666e483e75b149880ce3b54dc85/cmd/bpf2go/gen/target.go#L14-L28
+BPF_TARGET_ARCH ?= x86
+BPF_TARGET_MARCH ?= bpf
+BPF_TARGET_COMPILE ?= 1
+ifeq ($(GOARCH),386)
+ BPF_TARGET_ARCH = x86
+ BPF_TARGET_MARCH = bpfel
+else ifeq ($(GOARCH),amd64)
+ BPF_TARGET_ARCH = x86
+ BPF_TARGET_MARCH = bpfel
+else ifeq ($(GOARCH),arm64)
+ BPF_TARGET_ARCH = arm64
+ BPF_TARGET_MARCH = bpfel
+else ifeq ($(GOARCH),mpis)
+ BPF_TARGET_ARCH = mips
+ BPF_TARGET_MARCH = bpfeb
+else ifeq ($(GOARCH),ppc64)
+ BPF_TARGET_ARCH = powerpc
+ BPF_TARGET_MARCH = bpfeb
+else ifeq ($(GOARCH),ppc64le)
+ BPF_TARGET_ARCH = powerpc
+ BPF_TARGET_MARCH = bpfel
+else ifeq ($(GOARCH),riscv64)
+ BPF_TARGET_ARCH = riscv
+ BPF_TARGET_MARCH = bpfel
+else ifeq ($(GOARCH),s390x)
+ BPF_TARGET_ARCH = s390
+ BPF_TARGET_MARCH = bpfeb
+endif
+
+# Do not compile BPF assets for mipsle, mips64 and mips64le architectures
+ifeq ($(GOARCH),mipsle)
+ BPF_TARGET_COMPILE = 0
+endif
+ifeq ($(GOARCH),mips64)
+ BPF_TARGET_COMPILE = 0
+endif
+ifeq ($(GOARCH),mips64le)
+ BPF_TARGET_COMPILE = 0
+endif
+
+ROOT_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
+
+IDIR = $(ROOT_DIR)include/
+LIBBPF = $(ROOT_DIR)libbpf/
+LDIR = $(ROOT_DIR)lib
+DEPS = $(patsubst %,$(IDIR)/%,$(_DEPS))
+
+FLAGS := -I$(ROOT_DIR) \
+ -Wall -Werror \
+ -Wno-address-of-packed-member -Wno-compare-distinct-pointer-types -Wno-unknown-warning-option \
+ -O2
+
+CLANG_FLAGS += $(FLAGS) -I $(LIBBPF) -I $(IDIR) -I $(LDIR) -target bpf -emit-llvm -g -D__TARGET_ARCH_$(BPF_TARGET_ARCH) -fdebug-default-version=4
+LLC_FLAGS := -march=$(BPF_TARGET_MARCH) -mcpu=v2 -mattr=dwarfris
+LLC_FLAGS_ALU32 := -march=$(BPF_TARGET_MARCH) -mcpu=v3 -mattr=dwarfris
diff --git a/pkg/collector/bpf/README.md b/pkg/collector/bpf/README.md
new file mode 100644
index 00000000..8d1c8b69
--- /dev/null
+++ b/pkg/collector/bpf/README.md
@@ -0,0 +1 @@
+
diff --git a/pkg/collector/bpf/include/compiler.h b/pkg/collector/bpf/include/compiler.h
new file mode 100644
index 00000000..85a308fc
--- /dev/null
+++ b/pkg/collector/bpf/include/compiler.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef MAX_MAP_ENTRIES
+#define MAX_MAP_ENTRIES 256
+#endif
+
+#define FUNC_INLINE static inline __attribute__((always_inline))
+
+/*
+ * Following define is to assist VSCode Intellisense so that it treats
+ * __builtin_preserve_access_index() as a const void * instead of a
+ * simple void (because it doesn't have a definition for it). This stops
+ * Intellisense marking all _(P) macros (used in probe_read()) as errors.
+ * To use this, just define VSCODE in 'C/C++: Edit Configurations (JSON)'
+ * in the Command Palette in VSCODE (F1 or View->Command Palette...):
+ * "defines": ["VSCODE"]
+ * under configurations.
+ */
+#ifdef VSCODE
+const void *__builtin_preserve_access_index(void *);
+#endif
+#define _(P) (__builtin_preserve_access_index(P))
+
+#ifndef likely
+# define likely(X) __builtin_expect(!!(X), 1)
+#endif
+
+#ifndef unlikely
+# define unlikely(X) __builtin_expect(!!(X), 0)
+#endif
+
+#ifndef __inline__
+# define __inline__ __attribute__((always_inline))
+#endif
+
+#define DEBUG
+#ifdef DEBUG
+/* Only use this for debug output. Notice output from bpf_trace_printk()
+ * ends up in /sys/kernel/debug/tracing/trace_pipe
+ */
+#define bpf_debug(fmt, ...) \
+ ({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
+ })
+#else
+#define bpf_debug(fmt, ...){;}
+#endif
+
+// Just to ensure that we can use vfs_write/vfs_read calls
+// Picked from https://github.com/torvalds/linux/blob/master/tools/include/linux/types.h#L56
+#ifndef __user
+#define __user
+#endif
diff --git a/pkg/collector/bpf/include/net_shared.h b/pkg/collector/bpf/include/net_shared.h
new file mode 100644
index 00000000..4cc59b0f
--- /dev/null
+++ b/pkg/collector/bpf/include/net_shared.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef _NET_SHARED_H
+#define _NET_SHARED_H
+
+#define AF_INET 2
+#define AF_INET6 10
+
+#define ETH_ALEN 6
+#define ETH_P_802_3_MIN 0x0600
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define ETH_P_ARP 0x0806
+#define IPPROTO_ICMPV6 58
+
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#define IFNAMSIZ 16
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_ntohs(x) __builtin_bswap16(x)
+#define bpf_htons(x) __builtin_bswap16(x)
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define bpf_ntohs(x) (x)
+#define bpf_htons(x) (x)
+#else
+# error "Endianness detection needs to be set up for your compiler?!"
+#endif
+
+#endif
diff --git a/pkg/collector/bpf/include/vmlinux.h b/pkg/collector/bpf/include/vmlinux.h
new file mode 100644
index 00000000..2e692b64
--- /dev/null
+++ b/pkg/collector/bpf/include/vmlinux.h
@@ -0,0 +1,85188 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef __VMLINUX_H__
+#define __VMLINUX_H__
+
+/* User configurable BTF */
+enum generic_func_args_enum {
+ /* tcp sock stat sample info */
+ send_check_pkt_sample = 0x50,
+};
+
+/* Kernel BTF */
+typedef signed char __s8;
+
+typedef unsigned char __u8;
+
+typedef short unsigned int __u16;
+
+typedef int __s32;
+
+typedef unsigned int __u32;
+
+typedef long long int __s64;
+
+typedef long long unsigned int __u64;
+
+typedef __s8 s8;
+
+typedef __u8 u8;
+
+typedef __u16 u16;
+
+typedef __s32 s32;
+
+typedef __u32 u32;
+
+typedef __s64 s64;
+
+typedef __u64 u64;
+
+enum {
+ false = 0,
+ true = 1,
+};
+
+typedef long int __kernel_long_t;
+
+typedef long unsigned int __kernel_ulong_t;
+
+typedef int __kernel_pid_t;
+
+typedef unsigned int __kernel_uid32_t;
+
+typedef unsigned int __kernel_gid32_t;
+
+typedef __kernel_ulong_t __kernel_size_t;
+
+typedef __kernel_long_t __kernel_ssize_t;
+
+typedef long long int __kernel_loff_t;
+
+typedef long long int __kernel_time64_t;
+
+typedef __kernel_long_t __kernel_clock_t;
+
+typedef int __kernel_timer_t;
+
+typedef int __kernel_clockid_t;
+
+typedef unsigned int __poll_t;
+
+typedef u32 __kernel_dev_t;
+
+typedef __kernel_dev_t dev_t;
+
+typedef short unsigned int umode_t;
+
+typedef __kernel_pid_t pid_t;
+
+typedef __kernel_clockid_t clockid_t;
+
+typedef _Bool bool;
+
+typedef __kernel_uid32_t uid_t;
+
+typedef __kernel_gid32_t gid_t;
+
+typedef __kernel_loff_t loff_t;
+
+typedef __kernel_size_t size_t;
+
+typedef __kernel_ssize_t ssize_t;
+
+typedef u32 uint32_t;
+
+typedef u64 sector_t;
+
+typedef u64 blkcnt_t;
+
+typedef u64 dma_addr_t;
+
+typedef unsigned int gfp_t;
+
+typedef unsigned int fmode_t;
+
+typedef u64 phys_addr_t;
+
+typedef phys_addr_t resource_size_t;
+
+typedef struct {
+ int counter;
+} atomic_t;
+
+typedef struct {
+ s64 counter;
+} atomic64_t;
+
+struct list_head {
+ struct list_head *next;
+ struct list_head *prev;
+};
+
+struct hlist_node;
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next;
+ struct hlist_node **pprev;
+};
+
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *);
+};
+
+typedef int initcall_entry_t;
+
+struct lockdep_subclass_key {
+ char __one_byte;
+};
+
+struct lock_class_key {
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[8];
+ };
+};
+
+struct fs_context;
+
+struct fs_parameter_spec;
+
+struct dentry;
+
+struct super_block;
+
+struct module;
+
+struct file_system_type {
+ const char *name;
+ int fs_flags;
+ int (*init_fs_context)(struct fs_context *);
+ const struct fs_parameter_spec *parameters;
+ struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);
+ void (*kill_sb)(struct super_block *);
+ struct module *owner;
+ struct file_system_type *next;
+ struct hlist_head fs_supers;
+ struct lock_class_key s_lock_key;
+ struct lock_class_key s_umount_key;
+ struct lock_class_key s_vfs_rename_key;
+ struct lock_class_key s_writers_key[3];
+ struct lock_class_key i_lock_key;
+ struct lock_class_key i_mutex_key;
+ struct lock_class_key i_mutex_dir_key;
+};
+
+typedef void *fl_owner_t;
+
+struct file;
+
+struct kiocb;
+
+struct iov_iter;
+
+struct dir_context;
+
+struct poll_table_struct;
+
+struct vm_area_struct;
+
+struct mnt_idmap;
+
+struct inode;
+
+struct file_lock;
+
+struct page;
+
+struct pipe_inode_info;
+
+struct seq_file;
+
+struct file_operations {
+ struct module *owner;
+ loff_t (*llseek)(struct file *, loff_t, int);
+ ssize_t (*read)(struct file *, char *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
+ ssize_t (*read_iter)(struct kiocb *, struct iov_iter *);
+ ssize_t (*write_iter)(struct kiocb *, struct iov_iter *);
+ int (*iopoll)(struct kiocb *, bool);
+ int (*iterate)(struct file *, struct dir_context *);
+ int (*iterate_shared)(struct file *, struct dir_context *);
+ __poll_t (*poll)(struct file *, struct poll_table_struct *);
+ long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int);
+ long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int);
+ int (*mmap)(struct file *, struct vm_area_struct *);
+ long unsigned int mmap_supported_flags;
+ int (*open)(struct inode *, struct file *);
+ int (*flush)(struct file *, fl_owner_t);
+ int (*release)(struct inode *, struct file *);
+ int (*fsync)(struct file *, loff_t, loff_t, int);
+ int (*fasync)(int, struct file *, int);
+ int (*lock)(struct file *, int, struct file_lock *);
+ ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int);
+ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+ int (*check_flags)(int);
+ int (*flock)(struct file *, int, struct file_lock *);
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
+ ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long int, struct file_lock **, void **);
+ long int (*fallocate)(struct file *, int, loff_t, loff_t);
+ void (*show_fdinfo)(struct seq_file *, struct file *);
+ ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+ loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
+};
+
+struct qspinlock {
+ union {
+ atomic_t val;
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+ };
+};
+
+typedef struct qspinlock arch_spinlock_t;
+
+struct lock_class;
+
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[2];
+ const char *name;
+ short int wait_type_outer;
+ short int wait_type_inner;
+};
+
+struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+ unsigned int magic;
+ unsigned int owner_cpu;
+ void *owner;
+ struct lockdep_map dep_map;
+};
+
+struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+ struct {
+ u8 __padding[24];
+ struct lockdep_map dep_map;
+ };
+ };
+};
+
+typedef struct spinlock spinlock_t;
+
+struct notifier_block;
+
+struct atomic_notifier_head {
+ spinlock_t lock;
+ struct notifier_block *head;
+};
+
+enum system_states {
+ SYSTEM_BOOTING = 0,
+ SYSTEM_SCHEDULING = 1,
+ SYSTEM_RUNNING = 2,
+ SYSTEM_HALT = 3,
+ SYSTEM_POWER_OFF = 4,
+ SYSTEM_RESTART = 5,
+ SYSTEM_SUSPEND = 6,
+};
+
+struct taint_flag {
+ char c_true;
+ char c_false;
+ bool module;
+};
+
+struct static_key {
+ atomic_t enabled;
+};
+
+typedef atomic64_t atomic_long_t;
+
+struct static_key_true {
+ struct static_key key;
+};
+
+struct static_key_false {
+ struct static_key key;
+};
+
+typedef __s64 time64_t;
+
+struct __kernel_timespec {
+ __kernel_time64_t tv_sec;
+ long long int tv_nsec;
+};
+
+struct timezone {
+ int tz_minuteswest;
+ int tz_dsttime;
+};
+
+struct timespec64 {
+ time64_t tv_sec;
+ long int tv_nsec;
+};
+
+enum timespec_type {
+ TT_NONE = 0,
+ TT_NATIVE = 1,
+ TT_COMPAT = 2,
+};
+
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct pollfd;
+
+struct restart_block {
+ long int (*fn)(struct restart_block *);
+ union {
+ struct {
+ u32 *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 *uaddr2;
+ } futex;
+ struct {
+ clockid_t clockid;
+ enum timespec_type type;
+ union {
+ struct __kernel_timespec *rmtp;
+ struct old_timespec32 *compat_rmtp;
+ };
+ u64 expires;
+ } nanosleep;
+ struct {
+ struct pollfd *ufds;
+ int nfds;
+ int has_timeout;
+ long unsigned int tv_sec;
+ long unsigned int tv_nsec;
+ } poll;
+ };
+};
+
+struct thread_info {
+ long unsigned int flags;
+ u32 status;
+};
+
+struct refcount_struct {
+ atomic_t refs;
+};
+
+typedef struct refcount_struct refcount_t;
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+struct load_weight {
+ long unsigned int weight;
+ u32 inv_weight;
+};
+
+struct rb_node {
+ long unsigned int __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+};
+
+struct sched_statistics {};
+
+struct util_est {
+ unsigned int enqueued;
+ unsigned int ewma;
+};
+
+struct sched_avg {
+ u64 last_update_time;
+ u64 load_sum;
+ u64 runnable_sum;
+ u32 util_sum;
+ u32 period_contrib;
+ long unsigned int load_avg;
+ long unsigned int runnable_avg;
+ long unsigned int util_avg;
+ struct util_est util_est;
+};
+
+struct cfs_rq;
+
+struct sched_entity {
+ struct load_weight load;
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+ u64 nr_migrations;
+ struct sched_statistics statistics;
+ int depth;
+ struct sched_entity *parent;
+ struct cfs_rq *cfs_rq;
+ struct cfs_rq *my_q;
+ long unsigned int runnable_weight;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct sched_avg avg;
+};
+
+struct sched_rt_entity {
+ struct list_head run_list;
+ long unsigned int timeout;
+ long unsigned int watchdog_stamp;
+ unsigned int time_slice;
+ short unsigned int on_rq;
+ short unsigned int on_list;
+ struct sched_rt_entity *back;
+};
+
+typedef s64 ktime_t;
+
+struct timerqueue_node {
+ struct rb_node node;
+ ktime_t expires;
+};
+
+enum hrtimer_restart {
+ HRTIMER_NORESTART = 0,
+ HRTIMER_RESTART = 1,
+};
+
+struct hrtimer_clock_base;
+
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+struct sched_dl_entity {
+ struct rb_node rb_node;
+ u64 dl_runtime;
+ u64 dl_deadline;
+ u64 dl_period;
+ u64 dl_bw;
+ u64 dl_density;
+ s64 runtime;
+ u64 deadline;
+ unsigned int flags;
+ unsigned int dl_throttled: 1;
+ unsigned int dl_boosted: 1;
+ unsigned int dl_yielded: 1;
+ unsigned int dl_non_contending: 1;
+ unsigned int dl_overrun: 1;
+ struct hrtimer dl_timer;
+ struct hrtimer inactive_timer;
+};
+
+struct cpumask {
+ long unsigned int bits[4];
+};
+
+typedef struct cpumask cpumask_t;
+
+struct sched_info {};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+struct vmacache {
+ u64 seqnum;
+ struct vm_area_struct *vmas[4];
+};
+
+struct task_rss_stat {
+ int events;
+ int count[4];
+};
+
+typedef struct raw_spinlock raw_spinlock_t;
+
+struct prev_cputime {
+ u64 utime;
+ u64 stime;
+ raw_spinlock_t lock;
+};
+
+struct seqcount {
+ unsigned int sequence;
+ struct lockdep_map dep_map;
+};
+
+typedef struct seqcount seqcount_t;
+
+enum vtime_state {
+ VTIME_INACTIVE = 0,
+ VTIME_IDLE = 1,
+ VTIME_SYS = 2,
+ VTIME_USER = 3,
+ VTIME_GUEST = 4,
+};
+
+struct vtime {
+ seqcount_t seqcount;
+ long long unsigned int starttime;
+ enum vtime_state state;
+ unsigned int cpu;
+ u64 utime;
+ u64 stime;
+ u64 gtime;
+};
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+struct timerqueue_head {
+ struct rb_root_cached rb_root;
+};
+
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
+
+struct posix_cputimers {
+ struct posix_cputimer_base bases[3];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+struct sem_undo_list;
+
+struct sysv_sem {
+ struct sem_undo_list *undo_list;
+};
+
+struct sysv_shm {
+ struct list_head shm_clist;
+};
+
+typedef struct {
+ long unsigned int sig[1];
+} sigset_t;
+
+struct sigpending {
+ struct list_head list;
+ sigset_t signal;
+};
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+struct seccomp_filter;
+
+struct seccomp {
+ int mode;
+ struct seccomp_filter *filter;
+};
+
+struct wake_q_node {
+ struct wake_q_node *next;
+};
+
+struct held_lock {
+ u64 prev_chain_key;
+ long unsigned int acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+ unsigned int class_idx: 13;
+ unsigned int irq_context: 2;
+ unsigned int trylock: 1;
+ unsigned int read: 2;
+ unsigned int check: 1;
+ unsigned int hardirqs_off: 1;
+ unsigned int references: 12;
+ unsigned int pin_count;
+};
+
+struct task_io_accounting {};
+
+typedef struct {
+ long unsigned int bits[1];
+} nodemask_t;
+
+struct optimistic_spin_queue {
+ atomic_t tail;
+};
+
+struct mutex {
+ atomic_long_t owner;
+ spinlock_t wait_lock;
+ struct optimistic_spin_queue osq;
+ struct list_head wait_list;
+ void *magic;
+ struct lockdep_map dep_map;
+};
+
+struct arch_tlbflush_unmap_batch {
+ struct cpumask cpumask;
+};
+
+struct tlbflush_unmap_batch {
+ struct arch_tlbflush_unmap_batch arch;
+ bool flush_required;
+ bool writable;
+};
+
+struct page_frag {
+ struct page *page;
+ __u32 offset;
+ __u32 size;
+};
+
+struct desc_struct {
+ u16 limit0;
+ u16 base0;
+ u16 base1: 8;
+ u16 type: 4;
+ u16 s: 1;
+ u16 dpl: 2;
+ u16 p: 1;
+ u16 limit1: 4;
+ u16 avl: 1;
+ u16 l: 1;
+ u16 d: 1;
+ u16 g: 1;
+ u16 base2: 8;
+};
+
+typedef struct {
+ long unsigned int seg;
+} mm_segment_t;
+
+struct fregs_state {
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ u32 st_space[20];
+ u32 status;
+};
+
+struct fxregs_state {
+ u16 cwd;
+ u16 swd;
+ u16 twd;
+ u16 fop;
+ union {
+ struct {
+ u64 rip;
+ u64 rdp;
+ };
+ struct {
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ };
+ };
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32];
+ u32 xmm_space[64];
+ u32 padding[12];
+ union {
+ u32 padding1[12];
+ u32 sw_reserved[12];
+ };
+};
+
+struct math_emu_info;
+
+struct swregs_state {
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ u32 st_space[20];
+ u8 ftop;
+ u8 changed;
+ u8 lookahead;
+ u8 no_update;
+ u8 rm;
+ u8 alimit;
+ struct math_emu_info *info;
+ u32 entry_eip;
+};
+
+struct xstate_header {
+ u64 xfeatures;
+ u64 xcomp_bv;
+ u64 reserved[6];
+};
+
+struct xregs_state {
+ struct fxregs_state i387;
+ struct xstate_header header;
+ u8 extended_state_area[0];
+};
+
+union fpregs_state {
+ struct fregs_state fsave;
+ struct fxregs_state fxsave;
+ struct swregs_state soft;
+ struct xregs_state xsave;
+ u8 __padding[4096];
+};
+
+struct fpu {
+ unsigned int last_cpu;
+ long unsigned int avx512_timestamp;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ union fpregs_state state;
+};
+
+struct perf_event;
+
+struct io_bitmap;
+
+struct thread_struct {
+ struct desc_struct tls_array[3];
+ long unsigned int sp;
+ short unsigned int es;
+ short unsigned int ds;
+ short unsigned int fsindex;
+ short unsigned int gsindex;
+ long unsigned int fsbase;
+ long unsigned int gsbase;
+ struct perf_event *ptrace_bps[4];
+ long unsigned int debugreg6;
+ long unsigned int ptrace_dr7;
+ long unsigned int cr2;
+ long unsigned int trap_nr;
+ long unsigned int error_code;
+ struct io_bitmap *io_bitmap;
+ long unsigned int iopl_emul;
+ mm_segment_t addr_limit;
+ unsigned int sig_on_uaccess_err: 1;
+ long: 63;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct fpu fpu;
+};
+
+struct sched_class;
+
+struct task_group;
+
+struct mm_struct;
+
+struct pid;
+
+struct completion;
+
+struct cred;
+
+struct nameidata;
+
+struct fs_struct;
+
+struct files_struct;
+
+struct nsproxy;
+
+struct signal_struct;
+
+struct sighand_struct;
+
+struct audit_context;
+
+struct rt_mutex_waiter;
+
+struct mutex_waiter;
+
+struct bio_list;
+
+struct blk_plug;
+
+struct reclaim_state;
+
+struct backing_dev_info;
+
+struct io_context;
+
+struct capture_control;
+
+struct kernel_siginfo;
+
+typedef struct kernel_siginfo kernel_siginfo_t;
+
+struct css_set;
+
+struct robust_list_head;
+
+struct futex_pi_state;
+
+struct perf_event_context;
+
+struct rseq;
+
+struct mem_cgroup;
+
+struct request_queue;
+
+struct uprobe_task;
+
+struct vm_struct;
+
+struct audit_task_info {
+ kuid_t loginuid;
+};
+
+struct pid_link
+{
+ struct hlist_node node;
+ struct pid *pid;
+};
+
+enum pid_type {
+ PIDTYPE_PID = 0,
+ PIDTYPE_TGID = 1,
+ PIDTYPE_PGID = 2,
+ PIDTYPE_SID = 3,
+ PIDTYPE_MAX = 4,
+};
+
+struct task_struct {
+ struct thread_info thread_info;
+ volatile long int state;
+ void *stack;
+ refcount_t usage;
+ unsigned int flags;
+ unsigned int ptrace;
+ struct llist_node wake_entry;
+ unsigned int wake_entry_type;
+ int on_cpu;
+ unsigned int cpu;
+ unsigned int wakee_flips;
+ long unsigned int wakee_flip_decay_ts;
+ struct task_struct *last_wakee;
+ int recent_used_cpu;
+ int wake_cpu;
+ int on_rq;
+ int prio;
+ int static_prio;
+ int normal_prio;
+ unsigned int rt_priority;
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
+ struct task_group *sched_task_group;
+ struct sched_dl_entity dl;
+ unsigned int policy;
+ int nr_cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
+ struct sched_info sched_info;
+ struct list_head tasks;
+ struct plist_node pushable_tasks;
+ struct rb_node pushable_dl_tasks;
+ struct mm_struct *mm;
+ struct mm_struct *active_mm;
+ struct vmacache vmacache;
+ struct task_rss_stat rss_stat;
+ int exit_state;
+ int exit_code;
+ int exit_signal;
+ int pdeath_signal;
+ long unsigned int jobctl;
+ unsigned int personality;
+ unsigned int sched_reset_on_fork: 1;
+ unsigned int sched_contributes_to_load: 1;
+ unsigned int sched_migrated: 1;
+ unsigned int sched_remote_wakeup: 1;
+ int: 28;
+ unsigned int in_execve: 1;
+ unsigned int in_iowait: 1;
+ unsigned int restore_sigmask: 1;
+ unsigned int in_user_fault: 1;
+ unsigned int no_cgroup_migration: 1;
+ unsigned int frozen: 1;
+ unsigned int use_memdelay: 1;
+ long unsigned int atomic_flags;
+ struct restart_block restart_block;
+ pid_t pid;
+ pid_t tgid;
+ long unsigned int stack_canary;
+ struct task_struct *real_parent;
+ struct task_struct *parent;
+ struct list_head children;
+ struct list_head sibling;
+ struct task_struct *group_leader;
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+ struct pid *thread_pid;
+ struct pid_link pids[PIDTYPE_MAX]; // old school pid refs
+ struct hlist_node pid_links[4];
+ struct list_head thread_group;
+ struct list_head thread_node;
+ struct completion *vfork_done;
+ int *set_child_tid;
+ int *clear_child_tid;
+ u64 utime;
+ u64 stime;
+ u64 gtime;
+ struct prev_cputime prev_cputime;
+ struct vtime vtime;
+ atomic_t tick_dep_mask;
+ long unsigned int nvcsw;
+ long unsigned int nivcsw;
+ u64 start_time;
+ u64 start_boottime;
+ long unsigned int min_flt;
+ long unsigned int maj_flt;
+ struct posix_cputimers posix_cputimers;
+ const struct cred *ptracer_cred;
+ const struct cred *real_cred;
+ const struct cred *cred;
+ char comm[16];
+ struct nameidata *nameidata;
+ struct sysv_sem sysvsem;
+ struct sysv_shm sysvshm;
+ struct fs_struct *fs;
+ struct files_struct *files;
+ struct nsproxy *nsproxy;
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
+ sigset_t blocked;
+ sigset_t real_blocked;
+ sigset_t saved_sigmask;
+ struct sigpending pending;
+ long unsigned int sas_ss_sp;
+ size_t sas_ss_size;
+ unsigned int sas_ss_flags;
+ struct callback_head *task_works;
+ struct audit_context *audit_context;
+ struct audit_task_info *audit; // Added audit_task for older kernels
+ kuid_t loginuid;
+ unsigned int sessionid;
+ struct seccomp seccomp;
+ u64 parent_exec_id;
+ u64 self_exec_id;
+ spinlock_t alloc_lock;
+ raw_spinlock_t pi_lock;
+ struct wake_q_node wake_q;
+ struct rb_root_cached pi_waiters;
+ struct task_struct *pi_top_task;
+ struct rt_mutex_waiter *pi_blocked_on;
+ struct mutex_waiter *blocked_on;
+ int non_block_count;
+ unsigned int irq_events;
+ unsigned int hardirq_threaded;
+ long unsigned int hardirq_enable_ip;
+ long unsigned int hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ int hardirqs_enabled;
+ int hardirq_context;
+ u64 hardirq_chain_key;
+ long unsigned int softirq_disable_ip;
+ long unsigned int softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+ int softirqs_enabled;
+ int softirq_context;
+ int irq_config;
+ u64 curr_chain_key;
+ int lockdep_depth;
+ unsigned int lockdep_recursion;
+ struct held_lock held_locks[48];
+ void *journal_info;
+ struct bio_list *bio_list;
+ struct blk_plug *plug;
+ struct reclaim_state *reclaim_state;
+ struct backing_dev_info *backing_dev_info;
+ struct io_context *io_context;
+ struct capture_control *capture_control;
+ long unsigned int ptrace_message;
+ kernel_siginfo_t *last_siginfo;
+ struct task_io_accounting ioac;
+ nodemask_t mems_allowed;
+ seqcount_t mems_allowed_seq;
+ int cpuset_mem_spread_rotor;
+ int cpuset_slab_spread_rotor;
+ struct css_set *cgroups;
+ struct list_head cg_list;
+ struct robust_list_head *robust_list;
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
+ struct perf_event_context *perf_event_ctxp[2];
+ struct mutex perf_event_mutex;
+ struct list_head perf_event_list;
+ struct rseq *rseq;
+ u32 rseq_sig;
+ long unsigned int rseq_event_mask;
+ struct tlbflush_unmap_batch tlb_ubc;
+ union {
+ refcount_t rcu_users;
+ struct callback_head rcu;
+ };
+ struct pipe_inode_info *splice_pipe;
+ struct page_frag task_frag;
+ int nr_dirtied;
+ int nr_dirtied_pause;
+ long unsigned int dirty_paused_when;
+ u64 timer_slack_ns;
+ u64 default_timer_slack_ns;
+ long unsigned int trace;
+ long unsigned int trace_recursion;
+ struct mem_cgroup *memcg_in_oom;
+ gfp_t memcg_oom_gfp_mask;
+ int memcg_oom_order;
+ unsigned int memcg_nr_pages_over_high;
+ struct mem_cgroup *active_memcg;
+ struct request_queue *throttle_queue;
+ struct uprobe_task *utask;
+ long unsigned int task_state_change;
+ int pagefault_disabled;
+ struct task_struct *oom_reaper_list;
+ struct vm_struct *stack_vm_area;
+ refcount_t stack_refcount;
+ void *security;
+ struct thread_struct thread;
+};
+
+struct screen_info {
+ __u8 orig_x;
+ __u8 orig_y;
+ __u16 ext_mem_k;
+ __u16 orig_video_page;
+ __u8 orig_video_mode;
+ __u8 orig_video_cols;
+ __u8 flags;
+ __u8 unused2;
+ __u16 orig_video_ega_bx;
+ __u16 unused3;
+ __u8 orig_video_lines;
+ __u8 orig_video_isVGA;
+ __u16 orig_video_points;
+ __u16 lfb_width;
+ __u16 lfb_height;
+ __u16 lfb_depth;
+ __u32 lfb_base;
+ __u32 lfb_size;
+ __u16 cl_magic;
+ __u16 cl_offset;
+ __u16 lfb_linelength;
+ __u8 red_size;
+ __u8 red_pos;
+ __u8 green_size;
+ __u8 green_pos;
+ __u8 blue_size;
+ __u8 blue_pos;
+ __u8 rsvd_size;
+ __u8 rsvd_pos;
+ __u16 vesapm_seg;
+ __u16 vesapm_off;
+ __u16 pages;
+ __u16 vesa_attributes;
+ __u32 capabilities;
+ __u32 ext_lfb_base;
+ __u8 _reserved[2];
+} __attribute__((packed));
+
+struct apm_bios_info {
+ __u16 version;
+ __u16 cseg;
+ __u32 offset;
+ __u16 cseg_16;
+ __u16 dseg;
+ __u16 flags;
+ __u16 cseg_len;
+ __u16 cseg_16_len;
+ __u16 dseg_len;
+};
+
+struct apm_info {
+ struct apm_bios_info bios;
+ short unsigned int connection_version;
+ int get_power_status_broken;
+ int get_power_status_swabinminutes;
+ int allow_ints;
+ int forbid_idle;
+ int realmode_power_off;
+ int disabled;
+};
+
+struct edd_device_params {
+ __u16 length;
+ __u16 info_flags;
+ __u32 num_default_cylinders;
+ __u32 num_default_heads;
+ __u32 sectors_per_track;
+ __u64 number_of_sectors;
+ __u16 bytes_per_sector;
+ __u32 dpte_ptr;
+ __u16 key;
+ __u8 device_path_info_length;
+ __u8 reserved2;
+ __u16 reserved3;
+ __u8 host_bus_type[4];
+ __u8 interface_type[8];
+ union {
+ struct {
+ __u16 base_address;
+ __u16 reserved1;
+ __u32 reserved2;
+ } isa;
+ struct {
+ __u8 bus;
+ __u8 slot;
+ __u8 function;
+ __u8 channel;
+ __u32 reserved;
+ } pci;
+ struct {
+ __u64 reserved;
+ } ibnd;
+ struct {
+ __u64 reserved;
+ } xprs;
+ struct {
+ __u64 reserved;
+ } htpt;
+ struct {
+ __u64 reserved;
+ } unknown;
+ } interface_path;
+ union {
+ struct {
+ __u8 device;
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 reserved3;
+ __u64 reserved4;
+ } ata;
+ struct {
+ __u8 device;
+ __u8 lun;
+ __u8 reserved1;
+ __u8 reserved2;
+ __u32 reserved3;
+ __u64 reserved4;
+ } atapi;
+ struct {
+ __u16 id;
+ __u64 lun;
+ __u16 reserved1;
+ __u32 reserved2;
+ } __attribute__((packed)) scsi;
+ struct {
+ __u64 serial_number;
+ __u64 reserved;
+ } usb;
+ struct {
+ __u64 eui;
+ __u64 reserved;
+ } i1394;
+ struct {
+ __u64 wwid;
+ __u64 lun;
+ } fibre;
+ struct {
+ __u64 identity_tag;
+ __u64 reserved;
+ } i2o;
+ struct {
+ __u32 array_number;
+ __u32 reserved1;
+ __u64 reserved2;
+ } raid;
+ struct {
+ __u8 device;
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 reserved3;
+ __u64 reserved4;
+ } sata;
+ struct {
+ __u64 reserved1;
+ __u64 reserved2;
+ } unknown;
+ } device_path;
+ __u8 reserved4;
+ __u8 checksum;
+} __attribute__((packed));
+
+struct edd_info {
+ __u8 device;
+ __u8 version;
+ __u16 interface_support;
+ __u16 legacy_max_cylinder;
+ __u8 legacy_max_head;
+ __u8 legacy_sectors_per_track;
+ struct edd_device_params params;
+} __attribute__((packed));
+
+struct edd {
+ unsigned int mbr_signature[16];
+ struct edd_info edd_info[6];
+ unsigned char mbr_signature_nr;
+ unsigned char edd_info_nr;
+};
+
+struct ist_info {
+ __u32 signature;
+ __u32 command;
+ __u32 event;
+ __u32 perf_level;
+};
+
+struct edid_info {
+ unsigned char dummy[128];
+};
+
+struct setup_header {
+ __u8 setup_sects;
+ __u16 root_flags;
+ __u32 syssize;
+ __u16 ram_size;
+ __u16 vid_mode;
+ __u16 root_dev;
+ __u16 boot_flag;
+ __u16 jump;
+ __u32 header;
+ __u16 version;
+ __u32 realmode_swtch;
+ __u16 start_sys_seg;
+ __u16 kernel_version;
+ __u8 type_of_loader;
+ __u8 loadflags;
+ __u16 setup_move_size;
+ __u32 code32_start;
+ __u32 ramdisk_image;
+ __u32 ramdisk_size;
+ __u32 bootsect_kludge;
+ __u16 heap_end_ptr;
+ __u8 ext_loader_ver;
+ __u8 ext_loader_type;
+ __u32 cmd_line_ptr;
+ __u32 initrd_addr_max;
+ __u32 kernel_alignment;
+ __u8 relocatable_kernel;
+ __u8 min_alignment;
+ __u16 xloadflags;
+ __u32 cmdline_size;
+ __u32 hardware_subarch;
+ __u64 hardware_subarch_data;
+ __u32 payload_offset;
+ __u32 payload_length;
+ __u64 setup_data;
+ __u64 pref_address;
+ __u32 init_size;
+ __u32 handover_offset;
+ __u32 kernel_info_offset;
+} __attribute__((packed));
+
+struct sys_desc_table {
+ __u16 length;
+ __u8 table[14];
+};
+
+struct olpc_ofw_header {
+ __u32 ofw_magic;
+ __u32 ofw_version;
+ __u32 cif_handler;
+ __u32 irq_desc_table;
+};
+
+struct efi_info {
+ __u32 efi_loader_signature;
+ __u32 efi_systab;
+ __u32 efi_memdesc_size;
+ __u32 efi_memdesc_version;
+ __u32 efi_memmap;
+ __u32 efi_memmap_size;
+ __u32 efi_systab_hi;
+ __u32 efi_memmap_hi;
+};
+
+struct boot_e820_entry {
+ __u64 addr;
+ __u64 size;
+ __u32 type;
+} __attribute__((packed));
+
+struct boot_params {
+ struct screen_info screen_info;
+ struct apm_bios_info apm_bios_info;
+ __u8 _pad2[4];
+ __u64 tboot_addr;
+ struct ist_info ist_info;
+ __u64 acpi_rsdp_addr;
+ __u8 _pad3[8];
+ __u8 hd0_info[16];
+ __u8 hd1_info[16];
+ struct sys_desc_table sys_desc_table;
+ struct olpc_ofw_header olpc_ofw_header;
+ __u32 ext_ramdisk_image;
+ __u32 ext_ramdisk_size;
+ __u32 ext_cmd_line_ptr;
+ __u8 _pad4[116];
+ struct edid_info edid_info;
+ struct efi_info efi_info;
+ __u32 alt_mem_k;
+ __u32 scratch;
+ __u8 e820_entries;
+ __u8 eddbuf_entries;
+ __u8 edd_mbr_sig_buf_entries;
+ __u8 kbd_status;
+ __u8 secure_boot;
+ __u8 _pad5[2];
+ __u8 sentinel;
+ __u8 _pad6[1];
+ struct setup_header hdr;
+ __u8 _pad7[36];
+ __u32 edd_mbr_sig_buffer[16];
+ struct boot_e820_entry e820_table[128];
+ __u8 _pad8[48];
+ struct edd_info eddbuf[6];
+ __u8 _pad9[276];
+} __attribute__((packed));
+
+enum x86_hardware_subarch {
+ X86_SUBARCH_PC = 0,
+ X86_SUBARCH_LGUEST = 1,
+ X86_SUBARCH_XEN = 2,
+ X86_SUBARCH_INTEL_MID = 3,
+ X86_SUBARCH_CE4100 = 4,
+ X86_NR_SUBARCHS = 5,
+};
+
+struct range {
+ u64 start;
+ u64 end;
+};
+
+#if defined(__TARGET_ARCH_x86)
+
+struct pt_regs {
+ long unsigned int r15;
+ long unsigned int r14;
+ long unsigned int r13;
+ long unsigned int r12;
+ long unsigned int bp;
+ long unsigned int bx;
+ long unsigned int r11;
+ long unsigned int r10;
+ long unsigned int r9;
+ long unsigned int r8;
+ long unsigned int ax;
+ long unsigned int cx;
+ long unsigned int dx;
+ long unsigned int si;
+ long unsigned int di;
+ long unsigned int orig_ax;
+ long unsigned int ip;
+ long unsigned int cs;
+ long unsigned int flags;
+ long unsigned int sp;
+ long unsigned int ss;
+};
+
+#elif defined(__TARGET_ARCH_arm64)
+/* definitions for arm64 in this vmlinux.h file might be incomplete or wrong
+ * for more information, see: https://github.com/cilium/tetragon/issues/786
+ */
+
+struct user_pt_regs {
+ __u64 regs[31];
+ __u64 sp;
+ __u64 pc;
+ __u64 pstate;
+};
+
+struct pt_regs {
+ union {
+ struct user_pt_regs user_regs;
+ struct {
+ u64 regs[31];
+ u64 sp;
+ u64 pc;
+ u64 pstate;
+ };
+ };
+ u64 orig_x0;
+ s32 syscallno;
+ u32 unused2;
+ u64 sdei_ttbr1;
+ u64 pmr_save;
+ u64 stackframe[2];
+ u64 lockdep_hardirqs;
+ u64 exit_rcu;
+};
+
+#endif
+
+struct desc_ptr {
+ short unsigned int size;
+ long unsigned int address;
+} __attribute__((packed));
+
+typedef long unsigned int pteval_t;
+
+typedef long unsigned int pmdval_t;
+
+typedef long unsigned int pudval_t;
+
+typedef long unsigned int p4dval_t;
+
+typedef long unsigned int pgdval_t;
+
+typedef long unsigned int pgprotval_t;
+
+typedef struct {
+ pteval_t pte;
+} pte_t;
+
+struct pgprot {
+ pgprotval_t pgprot;
+};
+
+typedef struct pgprot pgprot_t;
+
+typedef struct {
+ pgdval_t pgd;
+} pgd_t;
+
+typedef struct {
+ pgd_t pgd;
+} p4d_t;
+
+typedef struct {
+ pudval_t pud;
+} pud_t;
+
+typedef struct {
+ pmdval_t pmd;
+} pmd_t;
+
+typedef struct page *pgtable_t;
+
+struct address_space;
+
+struct kmem_cache;
+
+struct dev_pagemap;
+
+struct page {
+ long unsigned int flags;
+ union {
+ struct {
+ struct list_head lru;
+ struct address_space *mapping;
+ long unsigned int index;
+ long unsigned int private;
+ };
+ struct {
+ dma_addr_t dma_addr;
+ };
+ struct {
+ union {
+ struct list_head slab_list;
+ struct {
+ struct page *next;
+ int pages;
+ int pobjects;
+ };
+ };
+ struct kmem_cache *slab_cache;
+ void *freelist;
+ union {
+ void *s_mem;
+ long unsigned int counters;
+ struct {
+ unsigned int inuse: 16;
+ unsigned int objects: 15;
+ unsigned int frozen: 1;
+ };
+ };
+ };
+ struct {
+ long unsigned int compound_head;
+ unsigned char compound_dtor;
+ unsigned char compound_order;
+ atomic_t compound_mapcount;
+ };
+ struct {
+ long unsigned int _compound_pad_1;
+ atomic_t hpage_pinned_refcount;
+ struct list_head deferred_list;
+ };
+ struct {
+ long unsigned int _pt_pad_1;
+ pgtable_t pmd_huge_pte;
+ long unsigned int _pt_pad_2;
+ union {
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
+ };
+ spinlock_t *ptl;
+ };
+ struct {
+ struct dev_pagemap *pgmap;
+ void *zone_device_data;
+ };
+ struct callback_head callback_head;
+ };
+ union {
+ atomic_t _mapcount;
+ unsigned int page_type;
+ unsigned int active;
+ int units;
+ };
+ atomic_t _refcount;
+ struct mem_cgroup *mem_cgroup;
+};
+
+struct paravirt_callee_save {
+ void *func;
+};
+
+struct pv_info {
+ const char *name;
+};
+
+struct pv_init_ops {
+ unsigned int (*patch)(u8, void *, long unsigned int, unsigned int);
+};
+
+struct pv_time_ops {
+ long long unsigned int (*sched_clock)();
+ long long unsigned int (*steal_clock)(int);
+};
+
+struct pv_cpu_ops {
+ void (*io_delay)();
+};
+
+struct pv_irq_ops {};
+
+struct flush_tlb_info;
+
+struct mmu_gather;
+
+struct pv_mmu_ops {
+ void (*flush_tlb_user)();
+ void (*flush_tlb_kernel)();
+ void (*flush_tlb_one_user)(long unsigned int);
+ void (*flush_tlb_others)(const struct cpumask *, const struct flush_tlb_info *);
+ void (*tlb_remove_table)(struct mmu_gather *, void *);
+ void (*exit_mmap)(struct mm_struct *);
+};
+
+struct flush_tlb_info {
+ struct mm_struct *mm;
+ long unsigned int start;
+ long unsigned int end;
+ u64 new_tlb_gen;
+ unsigned int stride_shift;
+ bool freed_tables;
+};
+
+struct rw_semaphore {
+ atomic_long_t count;
+ atomic_long_t owner;
+ struct optimistic_spin_queue osq;
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+ void *magic;
+ struct lockdep_map dep_map;
+};
+
+struct mm_rss_stat {
+ atomic_long_t count[4];
+};
+
+struct ldt_struct;
+
+struct vdso_image;
+
+typedef struct {
+ u64 ctx_id;
+ atomic64_t tlb_gen;
+ struct rw_semaphore ldt_usr_sem;
+ struct ldt_struct *ldt;
+ short unsigned int ia32_compat;
+ struct mutex lock;
+ void *vdso;
+ const struct vdso_image *vdso_image;
+ atomic_t perf_rdpmc_allowed;
+} mm_context_t;
+
+struct xol_area;
+
+struct uprobes_state {
+ struct xol_area *xol_area;
+};
+
+struct work_struct;
+
+typedef void (*work_func_t)(struct work_struct *);
+
+struct work_struct {
+ atomic_long_t data;
+ struct list_head entry;
+ work_func_t func;
+ struct lockdep_map lockdep_map;
+};
+
+struct linux_binfmt;
+
+struct core_state;
+
+struct kioctx_table;
+
+struct user_namespace;
+
+struct mm_struct {
+ struct {
+ struct vm_area_struct *mmap;
+ struct rb_root mm_rb;
+ u64 vmacache_seqnum;
+ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+ long unsigned int mmap_base;
+ long unsigned int mmap_legacy_base;
+ long unsigned int task_size;
+ long unsigned int highest_vm_end;
+ pgd_t *pgd;
+ atomic_t membarrier_state;
+ atomic_t mm_users;
+ atomic_t mm_count;
+ atomic_long_t pgtables_bytes;
+ int map_count;
+ spinlock_t page_table_lock;
+ struct rw_semaphore mmap_lock;
+ struct list_head mmlist;
+ long unsigned int hiwater_rss;
+ long unsigned int hiwater_vm;
+ long unsigned int total_vm;
+ long unsigned int locked_vm;
+ atomic64_t pinned_vm;
+ long unsigned int data_vm;
+ long unsigned int exec_vm;
+ long unsigned int stack_vm;
+ long unsigned int def_flags;
+ spinlock_t arg_lock;
+ long unsigned int start_code;
+ long unsigned int end_code;
+ long unsigned int start_data;
+ long unsigned int end_data;
+ long unsigned int start_brk;
+ long unsigned int brk;
+ long unsigned int start_stack;
+ long unsigned int arg_start;
+ long unsigned int arg_end;
+ long unsigned int env_start;
+ long unsigned int env_end;
+ long unsigned int saved_auxv[44];
+ struct mm_rss_stat rss_stat;
+ struct linux_binfmt *binfmt;
+ mm_context_t context;
+ long unsigned int flags;
+ struct core_state *core_state;
+ spinlock_t ioctx_lock;
+ struct kioctx_table *ioctx_table;
+ struct task_struct *owner;
+ struct user_namespace *user_ns;
+ struct file *exe_file;
+ atomic_t tlb_flush_pending;
+ bool tlb_flush_batched;
+ struct uprobes_state uprobes_state;
+ atomic_long_t hugetlb_usage;
+ struct work_struct async_put_work;
+ };
+ long unsigned int cpu_bitmap[0];
+};
+
+struct qrwlock {
+ union {
+ atomic_t cnts;
+ struct {
+ u8 wlocked;
+ u8 __lstate[3];
+ };
+ };
+ arch_spinlock_t wait_lock;
+};
+
+typedef struct qrwlock arch_rwlock_t;
+
+struct pv_lock_ops {
+ void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
+ struct paravirt_callee_save queued_spin_unlock;
+ void (*wait)(u8 *, u8);
+ void (*kick)(int);
+ struct paravirt_callee_save vcpu_is_preempted;
+};
+
+struct paravirt_patch_template {
+ struct pv_init_ops init;
+ struct pv_time_ops time;
+ struct pv_cpu_ops cpu;
+ struct pv_irq_ops irq;
+ struct pv_mmu_ops mmu;
+ struct pv_lock_ops lock;
+};
+
+struct paravirt_patch_site {
+ u8 *instr;
+ u8 type;
+ u8 len;
+};
+
+struct math_emu_info {
+ long int ___orig_eip;
+ struct pt_regs *regs;
+};
+
+typedef struct cpumask cpumask_var_t[1];
+
+struct tracepoint_func {
+ void *func;
+ void *data;
+ int prio;
+};
+
+struct tracepoint {
+ const char *name;
+ struct static_key key;
+ int (*regfunc)();
+ void (*unregfunc)();
+ struct tracepoint_func *funcs;
+};
+
+struct cpuinfo_x86 {
+ __u8 x86;
+ __u8 x86_vendor;
+ __u8 x86_model;
+ __u8 x86_stepping;
+ int x86_tlbsize;
+ __u32 vmx_capability[3];
+ __u8 x86_virt_bits;
+ __u8 x86_phys_bits;
+ __u8 x86_coreid_bits;
+ __u8 cu_id;
+ __u32 extended_cpuid_level;
+ int cpuid_level;
+ union {
+ __u32 x86_capability[20];
+ long unsigned int x86_capability_alignment;
+ };
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ unsigned int x86_cache_size;
+ int x86_cache_alignment;
+ int x86_cache_max_rmid;
+ int x86_cache_occ_scale;
+ int x86_cache_mbm_width_offset;
+ int x86_power;
+ long unsigned int loops_per_jiffy;
+ u16 x86_max_cores;
+ u16 apicid;
+ u16 initial_apicid;
+ u16 x86_clflush_size;
+ u16 booted_cores;
+ u16 phys_proc_id;
+ u16 logical_proc_id;
+ u16 cpu_core_id;
+ u16 cpu_die_id;
+ u16 logical_die_id;
+ u16 cpu_index;
+ u32 microcode;
+ u8 x86_cache_bits;
+ unsigned int initialized: 1;
+};
+
+struct seq_operations {
+ void * (*start)(struct seq_file *, loff_t *);
+ void (*stop)(struct seq_file *, void *);
+ void * (*next)(struct seq_file *, void *, loff_t *);
+ int (*show)(struct seq_file *, void *);
+};
+
+struct x86_hw_tss {
+ u32 reserved1;
+ u64 sp0;
+ u64 sp1;
+ u64 sp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+} __attribute__((packed));
+
+struct entry_stack {
+ long unsigned int words[64];
+};
+
+struct entry_stack_page {
+ struct entry_stack stack;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct x86_io_bitmap {
+ u64 prev_sequence;
+ unsigned int prev_max;
+ long unsigned int bitmap[1025];
+ long unsigned int mapall[1025];
+};
+
+struct tss_struct {
+ struct x86_hw_tss x86_tss;
+ struct x86_io_bitmap io_bitmap;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct irq_stack {
+ char stack[16384];
+};
+
+struct fixed_percpu_data {
+ char gs_base[40];
+ long unsigned int stack_canary;
+};
+
+enum l1tf_mitigations {
+ L1TF_MITIGATION_OFF = 0,
+ L1TF_MITIGATION_FLUSH_NOWARN = 1,
+ L1TF_MITIGATION_FLUSH = 2,
+ L1TF_MITIGATION_FLUSH_NOSMT = 3,
+ L1TF_MITIGATION_FULL = 4,
+ L1TF_MITIGATION_FULL_FORCE = 5,
+};
+
+struct mpc_table {
+ char signature[4];
+ short unsigned int length;
+ char spec;
+ char checksum;
+ char oem[8];
+ char productid[12];
+ unsigned int oemptr;
+ short unsigned int oemsize;
+ short unsigned int oemcount;
+ unsigned int lapic;
+ unsigned int reserved;
+};
+
+struct mpc_cpu {
+ unsigned char type;
+ unsigned char apicid;
+ unsigned char apicver;
+ unsigned char cpuflag;
+ unsigned int cpufeature;
+ unsigned int featureflag;
+ unsigned int reserved[2];
+};
+
+struct mpc_bus {
+ unsigned char type;
+ unsigned char busid;
+ unsigned char bustype[6];
+};
+
+struct mpc_intsrc {
+ unsigned char type;
+ unsigned char irqtype;
+ short unsigned int irqflag;
+ unsigned char srcbus;
+ unsigned char srcbusirq;
+ unsigned char dstapic;
+ unsigned char dstirq;
+};
+
+struct x86_init_mpparse {
+ void (*mpc_record)(unsigned int);
+ void (*setup_ioapic_ids)();
+ int (*mpc_apic_id)(struct mpc_cpu *);
+ void (*smp_read_mpc_oem)(struct mpc_table *);
+ void (*mpc_oem_pci_bus)(struct mpc_bus *);
+ void (*mpc_oem_bus_info)(struct mpc_bus *, char *);
+ void (*find_smp_config)();
+ void (*get_smp_config)(unsigned int);
+};
+
+struct x86_init_resources {
+ void (*probe_roms)();
+ void (*reserve_resources)();
+ char * (*memory_setup)();
+};
+
+struct x86_init_irqs {
+ void (*pre_vector_init)();
+ void (*intr_init)();
+ void (*intr_mode_select)();
+ void (*intr_mode_init)();
+};
+
+struct x86_init_oem {
+ void (*arch_setup)();
+ void (*banner)();
+};
+
+struct x86_init_paging {
+ void (*pagetable_init)();
+};
+
+struct x86_init_timers {
+ void (*setup_percpu_clockev)();
+ void (*timer_init)();
+ void (*wallclock_init)();
+};
+
+struct x86_init_iommu {
+ int (*iommu_init)();
+};
+
+struct x86_init_pci {
+ int (*arch_init)();
+ int (*init)();
+ void (*init_irq)();
+ void (*fixup_irqs)();
+};
+
+struct x86_hyper_init {
+ void (*init_platform)();
+ void (*guest_late_init)();
+ bool (*x2apic_available)();
+ void (*init_mem_mapping)();
+ void (*init_after_bootmem)();
+};
+
+struct x86_init_acpi {
+ void (*set_root_pointer)(u64);
+ u64 (*get_root_pointer)();
+ void (*reduced_hw_early_init)();
+};
+
+struct x86_init_ops {
+ struct x86_init_resources resources;
+ struct x86_init_mpparse mpparse;
+ struct x86_init_irqs irqs;
+ struct x86_init_oem oem;
+ struct x86_init_paging paging;
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
+ struct x86_init_pci pci;
+ struct x86_hyper_init hyper;
+ struct x86_init_acpi acpi;
+};
+
+struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)();
+ void (*early_percpu_clock_init)();
+ void (*fixup_cpu_id)(struct cpuinfo_x86 *, int);
+};
+
+struct x86_legacy_devices {
+ int pnpbios;
+};
+
+enum x86_legacy_i8042_state {
+ X86_LEGACY_I8042_PLATFORM_ABSENT = 0,
+ X86_LEGACY_I8042_FIRMWARE_ABSENT = 1,
+ X86_LEGACY_I8042_EXPECTED_PRESENT = 2,
+};
+
+struct x86_legacy_features {
+ enum x86_legacy_i8042_state i8042;
+ int rtc;
+ int warm_reset;
+ int no_vga;
+ int reserve_bios_regions;
+ struct x86_legacy_devices devices;
+};
+
+struct x86_hyper_runtime {
+ void (*pin_vcpu)(int);
+};
+
+struct x86_platform_ops {
+ long unsigned int (*calibrate_cpu)();
+ long unsigned int (*calibrate_tsc)();
+ void (*get_wallclock)(struct timespec64 *);
+ int (*set_wallclock)(const struct timespec64 *);
+ void (*iommu_shutdown)();
+ bool (*is_untracked_pat_range)(u64, u64);
+ void (*nmi_init)();
+ unsigned char (*get_nmi_reason)();
+ void (*save_sched_clock_state)();
+ void (*restore_sched_clock_state)();
+ void (*apic_post_init)();
+ struct x86_legacy_features legacy;
+ void (*set_legacy_features)();
+ struct x86_hyper_runtime hyper;
+};
+
+struct pci_dev;
+
+struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *, int, int);
+ void (*teardown_msi_irq)(unsigned int);
+ void (*teardown_msi_irqs)(struct pci_dev *);
+ void (*restore_msi_irqs)(struct pci_dev *);
+};
+
+struct x86_apic_ops {
+ unsigned int (*io_apic_read)(unsigned int, unsigned int);
+ void (*restore)();
+};
+
+struct physid_mask {
+ long unsigned int mask[512];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+struct lock_trace;
+
+struct lock_class {
+ struct hlist_node hash_entry;
+ struct list_head lock_entry;
+ struct list_head locks_after;
+ struct list_head locks_before;
+ const struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+ long unsigned int usage_mask;
+ const struct lock_trace *usage_traces[9];
+ int name_version;
+ const char *name;
+ short int wait_type_inner;
+ short int wait_type_outer;
+};
+
+struct lock_trace {
+ struct hlist_node hash_entry;
+ u32 hash;
+ u32 nr_entries;
+ long unsigned int entries[0];
+};
+
+typedef struct {
+ arch_rwlock_t raw_lock;
+ unsigned int magic;
+ unsigned int owner_cpu;
+ void *owner;
+ struct lockdep_map dep_map;
+} rwlock_t;
+
+struct ww_acquire_ctx;
+
+struct mutex_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ struct ww_acquire_ctx *ww_ctx;
+ void *magic;
+};
+
+struct vdso_image {
+ void *data;
+ long unsigned int size;
+ long unsigned int alt;
+ long unsigned int alt_len;
+ long int sym_vvar_start;
+ long int sym_vvar_page;
+ long int sym_pvclock_page;
+ long int sym_hvclock_page;
+ long int sym_timens_page;
+ long int sym_VDSO32_NOTE_MASK;
+ long int sym___kernel_sigreturn;
+ long int sym___kernel_rt_sigreturn;
+ long int sym___kernel_vsyscall;
+ long int sym_int80_landing_pad;
+};
+
+struct kref {
+ refcount_t refcount;
+};
+
+struct kset;
+
+struct kobj_type;
+
+struct kernfs_node;
+
+struct kobject {
+ const char *name;
+ struct list_head entry;
+ struct kobject *parent;
+ struct kset *kset;
+ struct kobj_type *ktype;
+ struct kernfs_node *sd;
+ struct kref kref;
+ unsigned int state_initialized: 1;
+ unsigned int state_in_sysfs: 1;
+ unsigned int state_add_uevent_sent: 1;
+ unsigned int state_remove_uevent_sent: 1;
+ unsigned int uevent_suppress: 1;
+};
+
+enum dl_dev_state {
+ DL_DEV_NO_DRIVER = 0,
+ DL_DEV_PROBING = 1,
+ DL_DEV_DRIVER_BOUND = 2,
+ DL_DEV_UNBINDING = 3,
+};
+
+struct dev_links_info {
+ struct list_head suppliers;
+ struct list_head consumers;
+ struct list_head needs_suppliers;
+ struct list_head defer_sync;
+ bool need_for_probe;
+ enum dl_dev_state status;
+};
+
+struct pm_message {
+ int event;
+};
+
+typedef struct pm_message pm_message_t;
+
+struct pm_subsys_data;
+
+struct device;
+
+struct dev_pm_qos;
+
+struct dev_pm_info {
+ pm_message_t power_state;
+ unsigned int can_wakeup: 1;
+ unsigned int async_suspend: 1;
+ bool in_dpm_list: 1;
+ bool is_prepared: 1;
+ bool is_suspended: 1;
+ bool is_noirq_suspended: 1;
+ bool is_late_suspended: 1;
+ bool no_pm: 1;
+ bool early_init: 1;
+ bool direct_complete: 1;
+ u32 driver_flags;
+ spinlock_t lock;
+ unsigned int should_wakeup: 1;
+ struct pm_subsys_data *subsys_data;
+ void (*set_latency_tolerance)(struct device *, s32);
+ struct dev_pm_qos *qos;
+};
+
+struct dev_archdata {};
+
+struct dev_iommu;
+
+struct device_private;
+
+struct device_type;
+
+struct bus_type;
+
+struct device_driver;
+
+struct dev_pm_domain;
+
+struct irq_domain;
+
+struct dma_map_ops;
+
+struct device_dma_parameters;
+
+struct device_node;
+
+struct fwnode_handle;
+
+struct class;
+
+struct attribute_group;
+
+struct iommu_group;
+
+struct device {
+ struct kobject kobj;
+ struct device *parent;
+ struct device_private *p;
+ const char *init_name;
+ const struct device_type *type;
+ struct bus_type *bus;
+ struct device_driver *driver;
+ void *platform_data;
+ void *driver_data;
+ struct mutex lockdep_mutex;
+ struct mutex mutex;
+ struct dev_links_info links;
+ struct dev_pm_info power;
+ struct dev_pm_domain *pm_domain;
+ struct irq_domain *msi_domain;
+ struct list_head msi_list;
+ const struct dma_map_ops *dma_ops;
+ u64 *dma_mask;
+ u64 coherent_dma_mask;
+ u64 bus_dma_limit;
+ long unsigned int dma_pfn_offset;
+ struct device_dma_parameters *dma_parms;
+ struct list_head dma_pools;
+ struct dev_archdata archdata;
+ struct device_node *of_node;
+ struct fwnode_handle *fwnode;
+ dev_t devt;
+ u32 id;
+ spinlock_t devres_lock;
+ struct list_head devres_head;
+ struct class *class;
+ const struct attribute_group **groups;
+ void (*release)(struct device *);
+ struct iommu_group *iommu_group;
+ struct dev_iommu *iommu;
+ bool offline_disabled: 1;
+ bool offline: 1;
+ bool of_node_reused: 1;
+ bool state_synced: 1;
+};
+
+enum fixed_addresses {
+ VSYSCALL_PAGE = 511,
+ FIX_DBGP_BASE = 512,
+ FIX_EARLYCON_MEM_BASE = 513,
+ FIX_APIC_BASE = 514,
+ FIX_IO_APIC_BASE_0 = 515,
+ FIX_IO_APIC_BASE_END = 642,
+ FIX_PARAVIRT_BOOTMAP = 643,
+ __end_of_permanent_fixed_addresses = 644,
+ FIX_BTMAP_END = 1024,
+ FIX_BTMAP_BEGIN = 1535,
+ __end_of_fixed_addresses = 1536,
+};
+
+struct vm_userfaultfd_ctx {};
+
+struct anon_vma;
+
+struct vm_operations_struct;
+
+struct vm_area_struct {
+ long unsigned int vm_start;
+ long unsigned int vm_end;
+ struct vm_area_struct *vm_next;
+ struct vm_area_struct *vm_prev;
+ struct rb_node vm_rb;
+ long unsigned int rb_subtree_gap;
+ struct mm_struct *vm_mm;
+ pgprot_t vm_page_prot;
+ long unsigned int vm_flags;
+ struct {
+ struct rb_node rb;
+ long unsigned int rb_subtree_last;
+ } shared;
+ struct list_head anon_vma_chain;
+ struct anon_vma *anon_vma;
+ const struct vm_operations_struct *vm_ops;
+ long unsigned int vm_pgoff;
+ struct file *vm_file;
+ void *vm_private_data;
+ atomic_long_t swap_readahead_info;
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+};
+
+struct wait_queue_head {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+typedef struct wait_queue_head wait_queue_head_t;
+
+struct swait_queue_head {
+ raw_spinlock_t lock;
+ struct list_head task_list;
+};
+
+struct completion {
+ unsigned int done;
+ struct swait_queue_head wait;
+};
+
+typedef struct {
+ struct seqcount seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *);
+
+struct notifier_block {
+ notifier_fn_t notifier_call;
+ struct notifier_block *next;
+ int priority;
+};
+
+struct blocking_notifier_head {
+ struct rw_semaphore rwsem;
+ struct notifier_block *head;
+};
+
+struct arch_uprobe_task {
+ long unsigned int saved_scratch_register;
+ unsigned int saved_trap_nr;
+ unsigned int saved_tf;
+};
+
+enum uprobe_task_state {
+ UTASK_RUNNING = 0,
+ UTASK_SSTEP = 1,
+ UTASK_SSTEP_ACK = 2,
+ UTASK_SSTEP_TRAPPED = 3,
+};
+
+struct uprobe;
+
+struct return_instance;
+
+struct uprobe_task {
+ enum uprobe_task_state state;
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ long unsigned int vaddr;
+ };
+ struct {
+ struct callback_head dup_xol_work;
+ long unsigned int dup_xol_addr;
+ };
+ };
+ struct uprobe *active_uprobe;
+ long unsigned int xol_vaddr;
+ struct return_instance *return_instances;
+ unsigned int depth;
+};
+
+struct return_instance {
+ struct uprobe *uprobe;
+ long unsigned int func;
+ long unsigned int stack;
+ long unsigned int orig_ret_vaddr;
+ bool chained;
+ struct return_instance *next;
+};
+
+struct xarray {
+ spinlock_t xa_lock;
+ gfp_t xa_flags;
+ void *xa_head;
+};
+
+typedef u32 errseq_t;
+
+struct address_space_operations;
+
+struct address_space {
+ struct inode *host;
+ struct xarray i_pages;
+ gfp_t gfp_mask;
+ atomic_t i_mmap_writable;
+ struct rb_root_cached i_mmap;
+ struct rw_semaphore i_mmap_rwsem;
+ long unsigned int nrpages;
+ long unsigned int nrexceptional;
+ long unsigned int writeback_index;
+ const struct address_space_operations *a_ops;
+ long unsigned int flags;
+ errseq_t wb_err;
+ spinlock_t private_lock;
+ struct list_head private_list;
+ void *private_data;
+};
+
+struct vmem_altmap {
+ const long unsigned int base_pfn;
+ const long unsigned int end_pfn;
+ const long unsigned int reserve;
+ long unsigned int free;
+ long unsigned int align;
+ long unsigned int alloc;
+};
+
+struct resource {
+ resource_size_t start;
+ resource_size_t end;
+ const char *name;
+ long unsigned int flags;
+ long unsigned int desc;
+ struct resource *parent;
+ struct resource *sibling;
+ struct resource *child;
+};
+
+struct percpu_ref;
+
+typedef void percpu_ref_func_t(struct percpu_ref *);
+
+struct percpu_ref {
+ atomic_long_t count;
+ long unsigned int percpu_count_ptr;
+ percpu_ref_func_t *release;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic: 1;
+ bool allow_reinit: 1;
+ struct callback_head rcu;
+};
+
+enum memory_type {
+ MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_FS_DAX = 2,
+ MEMORY_DEVICE_DEVDAX = 3,
+ MEMORY_DEVICE_PCI_P2PDMA = 4,
+};
+
+struct dev_pagemap_ops;
+
+struct dev_pagemap {
+ struct vmem_altmap altmap;
+ struct resource res;
+ struct percpu_ref *ref;
+ struct percpu_ref internal_ref;
+ struct completion done;
+ enum memory_type type;
+ unsigned int flags;
+ const struct dev_pagemap_ops *ops;
+ void *owner;
+};
+
+struct vfsmount;
+
+struct path {
+ struct vfsmount *mnt;
+ struct dentry *dentry;
+};
+
+enum rw_hint {
+ WRITE_LIFE_NOT_SET = 0,
+ WRITE_LIFE_NONE = 1,
+ WRITE_LIFE_SHORT = 2,
+ WRITE_LIFE_MEDIUM = 3,
+ WRITE_LIFE_LONG = 4,
+ WRITE_LIFE_EXTREME = 5,
+};
+
+struct fown_struct {
+ rwlock_t lock;
+ struct pid *pid;
+ enum pid_type pid_type;
+ kuid_t uid;
+ kuid_t euid;
+ int signum;
+};
+
+struct file_ra_state {
+ long unsigned int start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
+};
+
+struct file {
+ union {
+ struct llist_node fu_llist;
+ struct callback_head fu_rcuhead;
+ } f_u;
+ struct path f_path;
+ struct inode *f_inode;
+ const struct file_operations *f_op;
+ spinlock_t f_lock;
+ enum rw_hint f_write_hint;
+ atomic_long_t f_count;
+ unsigned int f_flags;
+ fmode_t f_mode;
+ struct mutex f_pos_lock;
+ loff_t f_pos;
+ struct fown_struct f_owner;
+ const struct cred *f_cred;
+ struct file_ra_state f_ra;
+ u64 f_version;
+ void *f_security;
+ void *private_data;
+ struct list_head f_ep_links;
+ struct list_head f_tfile_llink;
+ struct address_space *f_mapping;
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
+};
+
+typedef unsigned int vm_fault_t;
+
+enum page_entry_size {
+ PE_SIZE_PTE = 0,
+ PE_SIZE_PMD = 1,
+ PE_SIZE_PUD = 2,
+};
+
+struct vm_fault;
+
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct *);
+ void (*close)(struct vm_area_struct *);
+ int (*split)(struct vm_area_struct *, long unsigned int);
+ int (*mremap)(struct vm_area_struct *);
+ vm_fault_t (*fault)(struct vm_fault *);
+ vm_fault_t (*huge_fault)(struct vm_fault *, enum page_entry_size);
+ void (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int);
+ long unsigned int (*pagesize)(struct vm_area_struct *);
+ vm_fault_t (*page_mkwrite)(struct vm_fault *);
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *);
+ int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int);
+ const char * (*name)(struct vm_area_struct *);
+ struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int);
+};
+
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
+struct vm_fault {
+ struct vm_area_struct *vma;
+ unsigned int flags;
+ gfp_t gfp_mask;
+ long unsigned int pgoff;
+ long unsigned int address;
+ pmd_t *pmd;
+ pud_t *pud;
+ pte_t orig_pte;
+ struct page *cow_page;
+ struct page *page;
+ pte_t *pte;
+ spinlock_t *ptl;
+ pgtable_t prealloc_pte;
+};
+
+typedef struct {
+ u16 __softirq_pending;
+ unsigned int __nmi_count;
+ unsigned int apic_timer_irqs;
+ unsigned int irq_spurious_count;
+ unsigned int icr_read_retry_count;
+ unsigned int kvm_posted_intr_ipis;
+ unsigned int kvm_posted_intr_wakeup_ipis;
+ unsigned int kvm_posted_intr_nested_ipis;
+ unsigned int x86_platform_ipis;
+ unsigned int apic_perf_irqs;
+ unsigned int apic_irq_work_irqs;
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+ unsigned int irq_tlb_count;
+ long: 64;
+} irq_cpustat_t;
+
+enum apic_intr_mode_id {
+ APIC_PIC = 0,
+ APIC_VIRTUAL_WIRE = 1,
+ APIC_VIRTUAL_WIRE_NO_CONFIG = 2,
+ APIC_SYMMETRIC_IO = 3,
+ APIC_SYMMETRIC_IO_NO_ROUTING = 4,
+};
+
+struct apic {
+ void (*eoi_write)(u32, u32);
+ void (*native_eoi_write)(u32, u32);
+ void (*write)(u32, u32);
+ u32 (*read)(u32);
+ void (*wait_icr_idle)();
+ u32 (*safe_wait_icr_idle)();
+ void (*send_IPI)(int, int);
+ void (*send_IPI_mask)(const struct cpumask *, int);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *, int);
+ void (*send_IPI_allbutself)(int);
+ void (*send_IPI_all)(int);
+ void (*send_IPI_self)(int);
+ u32 dest_logical;
+ u32 disable_esr;
+ u32 irq_delivery_mode;
+ u32 irq_dest_mode;
+ u32 (*calc_dest_apicid)(unsigned int);
+ u64 (*icr_read)();
+ void (*icr_write)(u32, u32);
+ int (*probe)();
+ int (*acpi_madt_oem_check)(char *, char *);
+ int (*apic_id_valid)(u32);
+ int (*apic_id_registered)();
+ bool (*check_apicid_used)(physid_mask_t *, int);
+ void (*init_apic_ldr)();
+ void (*ioapic_phys_id_map)(physid_mask_t *, physid_mask_t *);
+ void (*setup_apic_routing)();
+ int (*cpu_present_to_apicid)(int);
+ void (*apicid_to_cpu_present)(int, physid_mask_t *);
+ int (*check_phys_apicid_present)(int);
+ int (*phys_pkg_id)(int, int);
+ u32 (*get_apic_id)(long unsigned int);
+ u32 (*set_apic_id)(unsigned int);
+ int (*wakeup_secondary_cpu)(int, long unsigned int);
+ void (*inquire_remote_apic)(int);
+ char *name;
+};
+
+struct smp_ops {
+ void (*smp_prepare_boot_cpu)();
+ void (*smp_prepare_cpus)(unsigned int);
+ void (*smp_cpus_done)(unsigned int);
+ void (*stop_other_cpus)(int);
+ void (*crash_stop_other_cpus)();
+ void (*smp_send_reschedule)(int);
+ int (*cpu_up)(unsigned int, struct task_struct *);
+ int (*cpu_disable)();
+ void (*cpu_die)(unsigned int);
+ void (*play_dead)();
+ void (*send_call_func_ipi)(const struct cpumask *);
+ void (*send_call_func_single_ipi)(int);
+};
+
+enum pcpu_fc {
+ PCPU_FC_AUTO = 0,
+ PCPU_FC_EMBED = 1,
+ PCPU_FC_PAGE = 2,
+ PCPU_FC_NR = 3,
+};
+
+struct fwnode_operations;
+
+struct fwnode_handle {
+ struct fwnode_handle *secondary;
+ const struct fwnode_operations *ops;
+ struct device *dev;
+};
+
+struct fwnode_reference_args;
+
+struct fwnode_endpoint;
+
+struct fwnode_operations {
+ struct fwnode_handle * (*get)(struct fwnode_handle *);
+ void (*put)(struct fwnode_handle *);
+ bool (*device_is_available)(const struct fwnode_handle *);
+ const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *);
+ bool (*property_present)(const struct fwnode_handle *, const char *);
+ int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t);
+ int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t);
+ const char * (*get_name)(const struct fwnode_handle *);
+ const char * (*get_name_prefix)(const struct fwnode_handle *);
+ struct fwnode_handle * (*get_parent)(const struct fwnode_handle *);
+ struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *);
+ struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *);
+ int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *);
+ struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *);
+ struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *);
+ struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *);
+ int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *);
+ int (*add_links)(const struct fwnode_handle *, struct device *);
+};
+
+struct fwnode_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct fwnode_handle *local_fwnode;
+};
+
+struct fwnode_reference_args {
+ struct fwnode_handle *fwnode;
+ unsigned int nargs;
+ u64 args[8];
+};
+
+struct vm_struct {
+ struct vm_struct *next;
+ void *addr;
+ long unsigned int size;
+ long unsigned int flags;
+ struct page **pages;
+ unsigned int nr_pages;
+ phys_addr_t phys_addr;
+ const void *caller;
+};
+
+struct free_area {
+ struct list_head free_list[5];
+ long unsigned int nr_free;
+};
+
+struct zone_padding {
+ char x[0];
+};
+
+enum zone_stat_item {
+ NR_FREE_PAGES = 0,
+ NR_ZONE_LRU_BASE = 1,
+ NR_ZONE_INACTIVE_ANON = 1,
+ NR_ZONE_ACTIVE_ANON = 2,
+ NR_ZONE_INACTIVE_FILE = 3,
+ NR_ZONE_ACTIVE_FILE = 4,
+ NR_ZONE_UNEVICTABLE = 5,
+ NR_ZONE_WRITE_PENDING = 6,
+ NR_MLOCK = 7,
+ NR_PAGETABLE = 8,
+ NR_KERNEL_STACK_KB = 9,
+ NR_BOUNCE = 10,
+ NR_FREE_CMA_PAGES = 11,
+ NR_VM_ZONE_STAT_ITEMS = 12,
+};
+
+enum node_stat_item {
+ NR_LRU_BASE = 0,
+ NR_INACTIVE_ANON = 0,
+ NR_ACTIVE_ANON = 1,
+ NR_INACTIVE_FILE = 2,
+ NR_ACTIVE_FILE = 3,
+ NR_UNEVICTABLE = 4,
+ NR_SLAB_RECLAIMABLE = 5,
+ NR_SLAB_UNRECLAIMABLE = 6,
+ NR_ISOLATED_ANON = 7,
+ NR_ISOLATED_FILE = 8,
+ WORKINGSET_NODES = 9,
+ WORKINGSET_REFAULT = 10,
+ WORKINGSET_ACTIVATE = 11,
+ WORKINGSET_RESTORE = 12,
+ WORKINGSET_NODERECLAIM = 13,
+ NR_ANON_MAPPED = 14,
+ NR_FILE_MAPPED = 15,
+ NR_FILE_PAGES = 16,
+ NR_FILE_DIRTY = 17,
+ NR_WRITEBACK = 18,
+ NR_WRITEBACK_TEMP = 19,
+ NR_SHMEM = 20,
+ NR_SHMEM_THPS = 21,
+ NR_SHMEM_PMDMAPPED = 22,
+ NR_FILE_THPS = 23,
+ NR_FILE_PMDMAPPED = 24,
+ NR_ANON_THPS = 25,
+ NR_VMSCAN_WRITE = 26,
+ NR_VMSCAN_IMMEDIATE = 27,
+ NR_DIRTIED = 28,
+ NR_WRITTEN = 29,
+ NR_KERNEL_MISC_RECLAIMABLE = 30,
+ NR_FOLL_PIN_ACQUIRED = 31,
+ NR_FOLL_PIN_RELEASED = 32,
+ NR_VM_NODE_STAT_ITEMS = 33,
+};
+
+struct pglist_data;
+
+struct lruvec {
+ struct list_head lists[5];
+ long unsigned int anon_cost;
+ long unsigned int file_cost;
+ atomic_long_t inactive_age;
+ long unsigned int refaults;
+ long unsigned int flags;
+ struct pglist_data *pgdat;
+};
+
+struct per_cpu_pageset;
+
+struct zone {
+ long unsigned int _watermark[3];
+ long unsigned int watermark_boost;
+ long unsigned int nr_reserved_highatomic;
+ long int lowmem_reserve[5];
+ struct pglist_data *zone_pgdat;
+ struct per_cpu_pageset *pageset;
+ long unsigned int zone_start_pfn;
+ atomic_long_t managed_pages;
+ long unsigned int spanned_pages;
+ long unsigned int present_pages;
+ const char *name;
+ long unsigned int nr_isolate_pageblock;
+ seqlock_t span_seqlock;
+ int initialized;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct zone_padding _pad1_;
+ struct free_area free_area[11];
+ long unsigned int flags;
+ spinlock_t lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct zone_padding _pad2_;
+ long unsigned int percpu_drift_mark;
+ long unsigned int compact_cached_free_pfn;
+ long unsigned int compact_cached_migrate_pfn[2];
+ long unsigned int compact_init_migrate_pfn;
+ long unsigned int compact_init_free_pfn;
+ unsigned int compact_considered;
+ unsigned int compact_defer_shift;
+ int compact_order_failed;
+ bool compact_blockskip_flush;
+ bool contiguous;
+ short: 16;
+ struct zone_padding _pad3_;
+ atomic_long_t vm_stat[12];
+ atomic_long_t vm_numa_stat[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct zoneref {
+ struct zone *zone;
+ int zone_idx;
+};
+
+struct zonelist {
+ struct zoneref _zonerefs[6];
+};
+
+enum zone_type {
+ ZONE_DMA = 0,
+ ZONE_DMA32 = 1,
+ ZONE_NORMAL = 2,
+ ZONE_MOVABLE = 3,
+ ZONE_DEVICE = 4,
+ __MAX_NR_ZONES = 5,
+};
+
+struct per_cpu_nodestat;
+
+struct pglist_data {
+ struct zone node_zones[5];
+ struct zonelist node_zonelists[1];
+ int nr_zones;
+ spinlock_t node_size_lock;
+ long unsigned int node_start_pfn;
+ long unsigned int node_present_pages;
+ long unsigned int node_spanned_pages;
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+ wait_queue_head_t pfmemalloc_wait;
+ struct task_struct *kswapd;
+ int kswapd_order;
+ enum zone_type kswapd_highest_zoneidx;
+ int kswapd_failures;
+ int kcompactd_max_order;
+ enum zone_type kcompactd_highest_zoneidx;
+ wait_queue_head_t kcompactd_wait;
+ struct task_struct *kcompactd;
+ long unsigned int totalreserve_pages;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct zone_padding _pad1_;
+ spinlock_t lru_lock;
+ struct lruvec __lruvec;
+ long unsigned int flags;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct zone_padding _pad2_;
+ struct per_cpu_nodestat *per_cpu_nodestats;
+ atomic_long_t vm_stat[33];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef unsigned int isolate_mode_t;
+
+struct per_cpu_pages {
+ int count;
+ int high;
+ int batch;
+ struct list_head lists[3];
+};
+
+struct per_cpu_pageset {
+ struct per_cpu_pages pcp;
+ s8 stat_threshold;
+ s8 vm_stat_diff[12];
+};
+
+struct per_cpu_nodestat {
+ s8 stat_threshold;
+ s8 vm_node_stat_diff[33];
+};
+
+struct mem_section_usage {
+ long unsigned int subsection_map[1];
+ long unsigned int pageblock_flags[0];
+};
+
+struct mem_section {
+ long unsigned int section_mem_map;
+ struct mem_section_usage *usage;
+};
+
+struct shrink_control {
+ gfp_t gfp_mask;
+ int nid;
+ long unsigned int nr_to_scan;
+ long unsigned int nr_scanned;
+ struct mem_cgroup *memcg;
+};
+
+struct shrinker {
+ long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *);
+ long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *);
+ long int batch;
+ int seeks;
+ unsigned int flags;
+ struct list_head list;
+ int id;
+ atomic_long_t *nr_deferred;
+};
+
+struct rlimit {
+ __kernel_ulong_t rlim_cur;
+ __kernel_ulong_t rlim_max;
+};
+
+struct dev_pagemap_ops {
+ void (*page_free)(struct page *);
+ void (*kill)(struct dev_pagemap *);
+ void (*cleanup)(struct dev_pagemap *);
+ vm_fault_t (*migrate_to_ram)(struct vm_fault *);
+};
+
+struct pid_namespace;
+
+struct upid {
+ int nr;
+ struct pid_namespace *ns;
+};
+
+struct pid {
+ refcount_t count;
+ unsigned int level;
+ spinlock_t lock;
+ struct hlist_head tasks[4];
+ struct hlist_head inodes;
+ wait_queue_head_t wait_pidfd;
+ struct callback_head rcu;
+ struct upid numbers[1];
+};
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+struct hrtimer_cpu_base;
+
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ unsigned int index;
+ clockid_t clockid;
+ seqcount_t seq;
+ struct hrtimer *running;
+ struct timerqueue_head active;
+ ktime_t (*get_time)();
+ ktime_t offset;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+ unsigned int hres_active: 1;
+ unsigned int in_hrtirq: 1;
+ unsigned int hang_detected: 1;
+ unsigned int softirq_activated: 1;
+ unsigned int nr_events;
+ short unsigned int nr_retries;
+ short unsigned int nr_hangs;
+ unsigned int max_hang_time;
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
+ struct hrtimer_clock_base clock_base[8];
+};
+
+struct tick_device;
+
+typedef void __signalfn_t(int);
+
+typedef __signalfn_t *__sighandler_t;
+
+typedef void __restorefn_t();
+
+typedef __restorefn_t *__sigrestore_t;
+
+union sigval {
+ int sival_int;
+ void *sival_ptr;
+};
+
+typedef union sigval sigval_t;
+
+union __sifields {
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ } _kill;
+ struct {
+ __kernel_timer_t _tid;
+ int _overrun;
+ sigval_t _sigval;
+ int _sys_private;
+ } _timer;
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ sigval_t _sigval;
+ } _rt;
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ int _status;
+ __kernel_clock_t _utime;
+ __kernel_clock_t _stime;
+ } _sigchld;
+ struct {
+ void *_addr;
+ union {
+ short int _addr_lsb;
+ struct {
+ char _dummy_bnd[8];
+ void *_lower;
+ void *_upper;
+ } _addr_bnd;
+ struct {
+ char _dummy_pkey[8];
+ __u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+ struct {
+ long int _band;
+ int _fd;
+ } _sigpoll;
+ struct {
+ void *_call_addr;
+ int _syscall;
+ unsigned int _arch;
+ } _sigsys;
+};
+
+struct kernel_siginfo {
+ struct {
+ int si_signo;
+ int si_errno;
+ int si_code;
+ union __sifields _sifields;
+ };
+};
+
+struct ratelimit_state {
+ raw_spinlock_t lock;
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ long unsigned int begin;
+ long unsigned int flags;
+};
+
+struct user_struct {
+ refcount_t __count;
+ atomic_t processes;
+ atomic_t sigpending;
+ atomic_t fanotify_listeners;
+ atomic_long_t epoll_watches;
+ long unsigned int mq_bytes;
+ long unsigned int locked_shm;
+ long unsigned int unix_inflight;
+ atomic_long_t pipe_bufs;
+ struct hlist_node uidhash_node;
+ kuid_t uid;
+ atomic_long_t locked_vm;
+ struct ratelimit_state ratelimit;
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ long unsigned int sa_flags;
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask;
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+struct cpu_itimer {
+ u64 expires;
+ u64 incr;
+};
+
+struct task_cputime_atomic {
+ atomic64_t utime;
+ atomic64_t stime;
+ atomic64_t sum_exec_runtime;
+};
+
+struct thread_group_cputimer {
+ struct task_cputime_atomic cputime_atomic;
+};
+
+struct tty_struct;
+
+struct tty_audit_buf;
+
+struct signal_struct {
+ refcount_t sigcnt;
+ atomic_t live;
+ int nr_threads;
+ struct list_head thread_head;
+ wait_queue_head_t wait_chldexit;
+ struct task_struct *curr_target;
+ struct sigpending shared_pending;
+ struct hlist_head multiprocess;
+ int group_exit_code;
+ int notify_count;
+ struct task_struct *group_exit_task;
+ int group_stop_count;
+ unsigned int flags;
+ unsigned int is_child_subreaper: 1;
+ unsigned int has_child_subreaper: 1;
+ int posix_timer_id;
+ struct list_head posix_timers;
+ struct hrtimer real_timer;
+ ktime_t it_real_incr;
+ struct cpu_itimer it[2];
+ struct thread_group_cputimer cputimer;
+ struct posix_cputimers posix_cputimers;
+ struct pid *pids[4];
+ atomic_t tick_dep_mask;
+ struct pid *tty_old_pgrp;
+ int leader;
+ struct tty_struct *tty;
+ seqlock_t stats_lock;
+ u64 utime;
+ u64 stime;
+ u64 cutime;
+ u64 cstime;
+ u64 gtime;
+ u64 cgtime;
+ struct prev_cputime prev_cputime;
+ long unsigned int nvcsw;
+ long unsigned int nivcsw;
+ long unsigned int cnvcsw;
+ long unsigned int cnivcsw;
+ long unsigned int min_flt;
+ long unsigned int maj_flt;
+ long unsigned int cmin_flt;
+ long unsigned int cmaj_flt;
+ long unsigned int inblock;
+ long unsigned int oublock;
+ long unsigned int cinblock;
+ long unsigned int coublock;
+ long unsigned int maxrss;
+ long unsigned int cmaxrss;
+ struct task_io_accounting ioac;
+ long long unsigned int sum_sched_runtime;
+ struct rlimit rlim[16];
+ unsigned int audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+ bool oom_flag_origin;
+ short int oom_score_adj;
+ short int oom_score_adj_min;
+ struct mm_struct *oom_mm;
+ struct mutex cred_guard_mutex;
+ struct mutex exec_update_mutex;
+};
+
+struct rseq {
+ __u32 cpu_id_start;
+ __u32 cpu_id;
+ union {
+ __u64 ptr64;
+ __u64 ptr;
+ } rseq_cs;
+ __u32 flags;
+ long: 32;
+ long: 64;
+};
+
+struct root_domain;
+
+struct rq;
+
+struct rq_flags;
+
+struct sched_class {
+ const struct sched_class *next;
+ void (*enqueue_task)(struct rq *, struct task_struct *, int);
+ void (*dequeue_task)(struct rq *, struct task_struct *, int);
+ void (*yield_task)(struct rq *);
+ bool (*yield_to_task)(struct rq *, struct task_struct *, bool);
+ void (*check_preempt_curr)(struct rq *, struct task_struct *, int);
+ struct task_struct * (*pick_next_task)(struct rq *);
+ void (*put_prev_task)(struct rq *, struct task_struct *);
+ void (*set_next_task)(struct rq *, struct task_struct *, bool);
+ int (*balance)(struct rq *, struct task_struct *, struct rq_flags *);
+ int (*select_task_rq)(struct task_struct *, int, int, int);
+ void (*migrate_task_rq)(struct task_struct *, int);
+ void (*task_woken)(struct rq *, struct task_struct *);
+ void (*set_cpus_allowed)(struct task_struct *, const struct cpumask *);
+ void (*rq_online)(struct rq *);
+ void (*rq_offline)(struct rq *);
+ void (*task_tick)(struct rq *, struct task_struct *, int);
+ void (*task_fork)(struct task_struct *);
+ void (*task_dead)(struct task_struct *);
+ void (*switched_from)(struct rq *, struct task_struct *);
+ void (*switched_to)(struct rq *, struct task_struct *);
+ void (*prio_changed)(struct rq *, struct task_struct *, int);
+ unsigned int (*get_rr_interval)(struct rq *, struct task_struct *);
+ void (*update_curr)(struct rq *);
+ void (*task_change_group)(struct task_struct *, int);
+};
+
+struct kernel_cap_struct {
+ __u32 cap[2];
+};
+
+typedef struct kernel_cap_struct kernel_cap_t;
+
+struct group_info;
+
+struct cred {
+ atomic_t usage;
+ kuid_t uid;
+ kgid_t gid;
+ kuid_t suid;
+ kgid_t sgid;
+ kuid_t euid;
+ kgid_t egid;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ unsigned int securebits;
+ kernel_cap_t cap_inheritable;
+ kernel_cap_t cap_permitted;
+ kernel_cap_t cap_effective;
+ kernel_cap_t cap_bset;
+ kernel_cap_t cap_ambient;
+ void *security;
+ struct user_struct *user;
+ struct user_namespace *user_ns;
+ struct group_info *group_info;
+ union {
+ int non_rcu;
+ struct callback_head rcu;
+ };
+};
+
+struct sighand_struct {
+ spinlock_t siglock;
+ refcount_t count;
+ wait_queue_head_t signalfd_wqh;
+ struct k_sigaction action[64];
+};
+
+struct io_cq;
+
+struct io_context {
+ atomic_long_t refcount;
+ atomic_t active_ref;
+ atomic_t nr_tasks;
+ spinlock_t lock;
+ short unsigned int ioprio;
+ int nr_batch_requests;
+ long unsigned int last_waited;
+ struct xarray icq_tree;
+ struct io_cq *icq_hint;
+ struct hlist_head icq_list;
+ struct work_struct release_work;
+};
+
+union thread_union {
+ struct task_struct task;
+ long unsigned int stack[2048];
+};
+
+struct hlist_bl_node;
+
+struct hlist_bl_head {
+ struct hlist_bl_node *first;
+};
+
+struct hlist_bl_node {
+ struct hlist_bl_node *next;
+ struct hlist_bl_node **pprev;
+};
+
+struct lockref {
+ union {
+ struct {
+ spinlock_t lock;
+ int count;
+ };
+ };
+};
+
+struct qstr {
+ union {
+ struct {
+ u32 hash;
+ u32 len;
+ };
+ u64 hash_len;
+ };
+ const unsigned char *name;
+};
+
+struct dentry_stat_t {
+ long int nr_dentry;
+ long int nr_unused;
+ long int age_limit;
+ long int want_pages;
+ long int nr_negative;
+ long int dummy;
+};
+
+struct dentry_operations;
+
+struct dentry {
+ unsigned int d_flags;
+ seqcount_t d_seq;
+ struct hlist_bl_node d_hash;
+ struct dentry *d_parent;
+ struct qstr d_name;
+ struct inode *d_inode;
+ unsigned char d_iname[32];
+ struct lockref d_lockref;
+ const struct dentry_operations *d_op;
+ struct super_block *d_sb;
+ long unsigned int d_time;
+ void *d_fsdata;
+ union {
+ struct list_head d_lru;
+ wait_queue_head_t *d_wait;
+ };
+ struct list_head d_child;
+ struct list_head d_subdirs;
+ union {
+ struct hlist_node d_alias;
+ struct hlist_bl_node d_in_lookup_hash;
+ struct callback_head d_rcu;
+ } d_u;
+};
+
+struct posix_acl;
+
+struct inode_operations;
+
+struct bdi_writeback;
+
+struct file_lock_context;
+
+struct block_device;
+
+struct cdev;
+
+struct fsnotify_mark_connector;
+
+struct inode {
+ umode_t i_mode;
+ short unsigned int i_opflags;
+ kuid_t i_uid;
+ kgid_t i_gid;
+ unsigned int i_flags;
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+ const struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct address_space *i_mapping;
+ void *i_security;
+ long unsigned int i_ino;
+ union {
+ const unsigned int i_nlink;
+ unsigned int __i_nlink;
+ };
+ dev_t i_rdev;
+ loff_t i_size;
+ struct timespec64 i_atime;
+ struct timespec64 i_mtime;
+ struct timespec64 i_ctime;
+ spinlock_t i_lock;
+ short unsigned int i_bytes;
+ u8 i_blkbits;
+ u8 i_write_hint;
+ blkcnt_t i_blocks;
+ long unsigned int i_state;
+ struct rw_semaphore i_rwsem;
+ long unsigned int dirtied_when;
+ long unsigned int dirtied_time_when;
+ struct hlist_node i_hash;
+ struct list_head i_io_list;
+ struct bdi_writeback *i_wb;
+ int i_wb_frn_winner;
+ u16 i_wb_frn_avg_time;
+ u16 i_wb_frn_history;
+ struct list_head i_lru;
+ struct list_head i_sb_list;
+ struct list_head i_wb_list;
+ union {
+ struct hlist_head i_dentry;
+ struct callback_head i_rcu;
+ };
+ atomic64_t i_version;
+ atomic64_t i_sequence;
+ atomic_t i_count;
+ atomic_t i_dio_count;
+ atomic_t i_writecount;
+ atomic_t i_readcount;
+ union {
+ const struct file_operations *i_fop;
+ void (*free_inode)(struct inode *);
+ };
+ struct file_lock_context *i_flctx;
+ struct address_space i_data;
+ struct list_head i_devices;
+ union {
+ struct pipe_inode_info *i_pipe;
+ struct block_device *i_bdev;
+ struct cdev *i_cdev;
+ char *i_link;
+ unsigned int i_dir_seq;
+ };
+ __u32 i_generation;
+ __u32 i_fsnotify_mask;
+ struct fsnotify_mark_connector *i_fsnotify_marks;
+ void *i_private;
+};
+
+struct dentry_operations {
+ int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
+ int (*d_hash)(const struct dentry *, struct qstr *);
+ int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);
+ int (*d_delete)(const struct dentry *);
+ int (*d_init)(struct dentry *);
+ void (*d_release)(struct dentry *);
+ void (*d_prune)(struct dentry *);
+ void (*d_iput)(struct dentry *, struct inode *);
+ char * (*d_dname)(struct dentry *, char *, int);
+ struct vfsmount * (*d_automount)(struct path *);
+ int (*d_manage)(const struct path *, bool);
+ struct dentry * (*d_real)(struct dentry *, const struct inode *);
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct mtd_info;
+
+typedef long long int qsize_t;
+
+struct quota_format_type;
+
+struct mem_dqinfo {
+ struct quota_format_type *dqi_format;
+ int dqi_fmt_id;
+ struct list_head dqi_dirty_list;
+ long unsigned int dqi_flags;
+ unsigned int dqi_bgrace;
+ unsigned int dqi_igrace;
+ qsize_t dqi_max_spc_limit;
+ qsize_t dqi_max_ino_limit;
+ void *dqi_priv;
+};
+
+struct quota_format_ops;
+
+struct quota_info {
+ unsigned int flags;
+ struct rw_semaphore dqio_sem;
+ struct inode *files[3];
+ struct mem_dqinfo info[3];
+ const struct quota_format_ops *ops[3];
+};
+
+struct rcu_sync {
+ int gp_state;
+ int gp_count;
+ wait_queue_head_t gp_wait;
+ struct callback_head cb_head;
+};
+
+struct rcuwait {
+ struct task_struct *task;
+};
+
+struct percpu_rw_semaphore {
+ struct rcu_sync rss;
+ unsigned int *read_count;
+ struct rcuwait writer;
+ wait_queue_head_t waiters;
+ atomic_t block;
+ struct lockdep_map dep_map;
+};
+
+struct sb_writers {
+ int frozen;
+ wait_queue_head_t wait_unfrozen;
+ struct percpu_rw_semaphore rw_sem[3];
+};
+
+typedef struct {
+ __u8 b[16];
+} uuid_t;
+
+struct list_lru_node;
+
+struct list_lru {
+ struct list_lru_node *node;
+ struct list_head list;
+ int shrinker_id;
+ bool memcg_aware;
+};
+
+struct super_operations;
+
+struct dquot_operations;
+
+struct quotactl_ops;
+
+struct export_operations;
+
+struct xattr_handler;
+
+struct workqueue_struct;
+
+struct super_block {
+ struct list_head s_list;
+ dev_t s_dev;
+ unsigned char s_blocksize_bits;
+ long unsigned int s_blocksize;
+ loff_t s_maxbytes;
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ long unsigned int s_flags;
+ long unsigned int s_iflags;
+ long unsigned int s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ int s_count;
+ atomic_t s_active;
+ void *s_security;
+ const struct xattr_handler **s_xattr;
+ struct hlist_bl_head s_roots;
+ struct list_head s_mounts;
+ struct block_device *s_bdev;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+ unsigned int s_quota_types;
+ struct quota_info s_dquot;
+ struct sb_writers s_writers;
+ void *s_fs_info;
+ u32 s_time_gran;
+ time64_t s_time_min;
+ time64_t s_time_max;
+ __u32 s_fsnotify_mask;
+ struct fsnotify_mark_connector *s_fsnotify_marks;
+ char s_id[32];
+ uuid_t s_uuid;
+ unsigned int s_max_links;
+ fmode_t s_mode;
+ struct mutex s_vfs_rename_mutex;
+ const char *s_subtype;
+ const struct dentry_operations *s_d_op;
+ int cleancache_poolid;
+ struct shrinker s_shrink;
+ atomic_long_t s_remove_count;
+ atomic_long_t s_fsnotify_inode_refs;
+ int s_readonly_remount;
+ errseq_t s_wb_err;
+ struct workqueue_struct *s_dio_done_wq;
+ struct hlist_head s_pins;
+ struct user_namespace *s_user_ns;
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
+ struct callback_head rcu;
+ struct work_struct destroy_work;
+ struct mutex s_sync_lock;
+ int s_stack_depth;
+ spinlock_t s_inode_list_lock;
+ struct list_head s_inodes;
+ spinlock_t s_inode_wblist_lock;
+ struct list_head s_inodes_wb;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct kstat {
+ u32 result_mask;
+ umode_t mode;
+ unsigned int nlink;
+ uint32_t blksize;
+ u64 attributes;
+ u64 attributes_mask;
+ u64 ino;
+ dev_t dev;
+ dev_t rdev;
+ kuid_t uid;
+ kgid_t gid;
+ loff_t size;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
+ struct timespec64 btime;
+ u64 blocks;
+ u64 mnt_id;
+};
+
+struct list_lru_one {
+ struct list_head list;
+ long int nr_items;
+};
+
+struct list_lru_memcg {
+ struct callback_head rcu;
+ struct list_lru_one *lru[0];
+};
+
+struct list_lru_node {
+ spinlock_t lock;
+ struct list_lru_one lru;
+ struct list_lru_memcg *memcg_lrus;
+ long int nr_items;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xa_node {
+ unsigned char shift;
+ unsigned char offset;
+ unsigned char count;
+ unsigned char nr_values;
+ struct xa_node *parent;
+ struct xarray *array;
+ union {
+ struct list_head private_list;
+ struct callback_head callback_head;
+ };
+ void *slots[64];
+ union {
+ long unsigned int tags[3];
+ long unsigned int marks[3];
+ };
+};
+
+typedef struct {
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+} local_lock_t;
+
+struct radix_tree_preload {
+ local_lock_t lock;
+ unsigned int nr;
+ struct xa_node *nodes;
+};
+
+enum migrate_mode {
+ MIGRATE_ASYNC = 0,
+ MIGRATE_SYNC_LIGHT = 1,
+ MIGRATE_SYNC = 2,
+ MIGRATE_SYNC_NO_COPY = 3,
+};
+
+struct ctl_table;
+
+typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
+
+struct ctl_table_poll;
+
+struct ctl_table {
+ const char *procname;
+ void *data;
+ int maxlen;
+ umode_t mode;
+ struct ctl_table *child;
+ proc_handler *proc_handler;
+ struct ctl_table_poll *poll;
+ void *extra1;
+ void *extra2;
+};
+
+struct ctl_table_poll {
+ atomic_t event;
+ wait_queue_head_t wait;
+};
+
+struct group_info {
+ atomic_t usage;
+ int ngroups;
+ kgid_t gid[0];
+};
+
+struct delayed_call {
+ void (*fn)(void *);
+ void *arg;
+};
+
+typedef struct {
+ __u8 b[16];
+} guid_t;
+
+struct io_cq {
+ struct request_queue *q;
+ struct io_context *ioc;
+ union {
+ struct list_head q_node;
+ struct kmem_cache *__rcu_icq_cache;
+ };
+ union {
+ struct hlist_node ioc_node;
+ struct callback_head __rcu_head;
+ };
+ unsigned int flags;
+};
+
+struct files_stat_struct {
+ long unsigned int nr_files;
+ long unsigned int nr_free_files;
+ long unsigned int max_files;
+};
+
+struct inodes_stat_t {
+ long int nr_inodes;
+ long int nr_unused;
+ long int dummy[5];
+};
+
+struct kiocb {
+ struct file *ki_filp;
+ loff_t ki_pos;
+ void (*ki_complete)(struct kiocb *, long int, long int);
+ void *private;
+ int ki_flags;
+ u16 ki_hint;
+ u16 ki_ioprio;
+ unsigned int ki_cookie;
+};
+
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ kuid_t ia_uid;
+ kgid_t ia_gid;
+ loff_t ia_size;
+ struct timespec64 ia_atime;
+ struct timespec64 ia_mtime;
+ struct timespec64 ia_ctime;
+ struct file *ia_file;
+};
+
+struct percpu_counter {
+ raw_spinlock_t lock;
+ s64 count;
+ struct list_head list;
+ s32 *counters;
+};
+
+typedef __kernel_uid32_t projid_t;
+
+typedef struct {
+ projid_t val;
+} kprojid_t;
+
+enum quota_type {
+ USRQUOTA = 0,
+ GRPQUOTA = 1,
+ PRJQUOTA = 2,
+};
+
+struct kqid {
+ union {
+ kuid_t uid;
+ kgid_t gid;
+ kprojid_t projid;
+ };
+ enum quota_type type;
+};
+
+struct mem_dqblk {
+ qsize_t dqb_bhardlimit;
+ qsize_t dqb_bsoftlimit;
+ qsize_t dqb_curspace;
+ qsize_t dqb_rsvspace;
+ qsize_t dqb_ihardlimit;
+ qsize_t dqb_isoftlimit;
+ qsize_t dqb_curinodes;
+ time64_t dqb_btime;
+ time64_t dqb_itime;
+};
+
+struct dquot {
+ struct hlist_node dq_hash;
+ struct list_head dq_inuse;
+ struct list_head dq_free;
+ struct list_head dq_dirty;
+ struct mutex dq_lock;
+ spinlock_t dq_dqb_lock;
+ atomic_t dq_count;
+ struct super_block *dq_sb;
+ struct kqid dq_id;
+ loff_t dq_off;
+ long unsigned int dq_flags;
+ struct mem_dqblk dq_dqb;
+};
+
+struct quota_format_type {
+ int qf_fmt_id;
+ const struct quota_format_ops *qf_ops;
+ struct module *qf_owner;
+ struct quota_format_type *qf_next;
+};
+
+struct dqstats {
+ long unsigned int stat[8];
+ struct percpu_counter counter[8];
+};
+
+struct quota_format_ops {
+ int (*check_quota_file)(struct super_block *, int);
+ int (*read_file_info)(struct super_block *, int);
+ int (*write_file_info)(struct super_block *, int);
+ int (*free_file_info)(struct super_block *, int);
+ int (*read_dqblk)(struct dquot *);
+ int (*commit_dqblk)(struct dquot *);
+ int (*release_dqblk)(struct dquot *);
+ int (*get_next_id)(struct super_block *, struct kqid *);
+};
+
+struct dquot_operations {
+ int (*write_dquot)(struct dquot *);
+ struct dquot * (*alloc_dquot)(struct super_block *, int);
+ void (*destroy_dquot)(struct dquot *);
+ int (*acquire_dquot)(struct dquot *);
+ int (*release_dquot)(struct dquot *);
+ int (*mark_dirty)(struct dquot *);
+ int (*write_info)(struct super_block *, int);
+ qsize_t * (*get_reserved_space)(struct inode *);
+ int (*get_projid)(struct inode *, kprojid_t *);
+ int (*get_inode_usage)(struct inode *, qsize_t *);
+ int (*get_next_id)(struct super_block *, struct kqid *);
+};
+
+struct qc_dqblk {
+ int d_fieldmask;
+ u64 d_spc_hardlimit;
+ u64 d_spc_softlimit;
+ u64 d_ino_hardlimit;
+ u64 d_ino_softlimit;
+ u64 d_space;
+ u64 d_ino_count;
+ s64 d_ino_timer;
+ s64 d_spc_timer;
+ int d_ino_warns;
+ int d_spc_warns;
+ u64 d_rt_spc_hardlimit;
+ u64 d_rt_spc_softlimit;
+ u64 d_rt_space;
+ s64 d_rt_spc_timer;
+ int d_rt_spc_warns;
+};
+
+struct qc_type_state {
+ unsigned int flags;
+ unsigned int spc_timelimit;
+ unsigned int ino_timelimit;
+ unsigned int rt_spc_timelimit;
+ unsigned int spc_warnlimit;
+ unsigned int ino_warnlimit;
+ unsigned int rt_spc_warnlimit;
+ long long unsigned int ino;
+ blkcnt_t blocks;
+ blkcnt_t nextents;
+};
+
+struct qc_state {
+ unsigned int s_incoredqs;
+ struct qc_type_state s_state[3];
+};
+
+struct qc_info {
+ int i_fieldmask;
+ unsigned int i_flags;
+ unsigned int i_spc_timelimit;
+ unsigned int i_ino_timelimit;
+ unsigned int i_rt_spc_timelimit;
+ unsigned int i_spc_warnlimit;
+ unsigned int i_ino_warnlimit;
+ unsigned int i_rt_spc_warnlimit;
+};
+
+struct quotactl_ops {
+ int (*quota_on)(struct super_block *, int, int, const struct path *);
+ int (*quota_off)(struct super_block *, int);
+ int (*quota_enable)(struct super_block *, unsigned int);
+ int (*quota_disable)(struct super_block *, unsigned int);
+ int (*quota_sync)(struct super_block *, int);
+ int (*set_info)(struct super_block *, int, struct qc_info *);
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_state)(struct super_block *, struct qc_state *);
+ int (*rm_xquota)(struct super_block *, unsigned int);
+};
+
+struct writeback_control;
+
+struct readahead_control;
+
+struct swap_info_struct;
+
+struct address_space_operations {
+ int (*writepage)(struct page *, struct writeback_control *);
+ int (*readpage)(struct file *, struct page *);
+ int (*writepages)(struct address_space *, struct writeback_control *);
+ int (*set_page_dirty)(struct page *);
+ int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);
+ void (*readahead)(struct readahead_control *);
+ int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page **, void **);
+ int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *);
+ sector_t (*bmap)(struct address_space *, sector_t);
+ void (*invalidatepage)(struct page *, unsigned int, unsigned int);
+ int (*releasepage)(struct page *, gfp_t);
+ void (*freepage)(struct page *);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *);
+ int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode);
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ void (*putback_page)(struct page *);
+ int (*launder_page)(struct page *);
+ int (*is_partially_uptodate)(struct page *, long unsigned int, long unsigned int);
+ void (*is_dirty_writeback)(struct page *, bool *, bool *);
+ int (*error_remove_page)(struct address_space *, struct page *);
+ int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);
+ void (*swap_deactivate)(struct file *);
+};
+
+struct hd_struct;
+
+struct gendisk;
+
+struct block_device {
+ dev_t bd_dev;
+ int bd_openers;
+ struct inode *bd_inode;
+ struct super_block *bd_super;
+ struct mutex bd_mutex;
+ void *bd_claiming;
+ void *bd_holder;
+ int bd_holders;
+ bool bd_write_holder;
+ struct list_head bd_holder_disks;
+ struct block_device *bd_contains;
+ unsigned int bd_block_size;
+ u8 bd_partno;
+ struct hd_struct *bd_part;
+ unsigned int bd_part_count;
+ int bd_invalidated;
+ struct gendisk *bd_disk;
+ struct request_queue *bd_queue;
+ struct backing_dev_info *bd_bdi;
+ struct list_head bd_list;
+ long unsigned int bd_private;
+ int bd_fsfreeze_count;
+ struct mutex bd_fsfreeze_mutex;
+};
+
+struct fiemap_extent_info;
+
+struct inode_operations {
+ struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);
+ const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);
+ int (*permission)(struct inode *, int);
+ struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*readlink)(struct dentry *, char *, int);
+ int (*create)(struct inode *, struct dentry *, umode_t, bool);
+ int (*link)(struct dentry *, struct inode *, struct dentry *);
+ int (*unlink)(struct inode *, struct dentry *);
+ int (*symlink)(struct inode *, struct dentry *, const char *);
+ int (*mkdir)(struct inode *, struct dentry *, umode_t);
+ int (*rmdir)(struct inode *, struct dentry *);
+ int (*mknod)(struct inode *, struct dentry *, umode_t, dev_t);
+ int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);
+ int (*setattr)(struct dentry *, struct iattr *);
+ int (*getattr)(const struct path *, struct kstat *, u32, unsigned int);
+ ssize_t (*listxattr)(struct dentry *, char *, size_t);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64);
+ int (*update_time)(struct inode *, struct timespec64 *, int);
+ int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t);
+ int (*tmpfile)(struct inode *, struct dentry *, umode_t);
+ int (*set_acl)(struct inode *, struct posix_acl *, int);
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct nlm_lockowner;
+
+struct nfs_lock_info {
+ u32 state;
+ struct nlm_lockowner *owner;
+ struct list_head list;
+};
+
+struct nfs4_lock_state;
+
+struct nfs4_lock_info {
+ struct nfs4_lock_state *owner;
+};
+
+struct fasync_struct;
+
+struct lock_manager_operations;
+
+struct file_lock {
+ struct file_lock *fl_blocker;
+ struct list_head fl_list;
+ struct hlist_node fl_link;
+ struct list_head fl_blocked_requests;
+ struct list_head fl_blocked_member;
+ fl_owner_t fl_owner;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ unsigned int fl_pid;
+ int fl_link_cpu;
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
+ loff_t fl_start;
+ loff_t fl_end;
+ struct fasync_struct *fl_fasync;
+ long unsigned int fl_break_time;
+ long unsigned int fl_downgrade_time;
+ const struct file_lock_operations *fl_ops;
+ const struct lock_manager_operations *fl_lmops;
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link;
+ int state;
+ unsigned int debug_id;
+ } afs;
+ } fl_u;
+};
+
+struct lock_manager_operations {
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *);
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
+};
+
+struct fasync_struct {
+ rwlock_t fa_lock;
+ int magic;
+ int fa_fd;
+ struct fasync_struct *fa_next;
+ struct file *fa_file;
+ struct callback_head fa_rcu;
+};
+
+struct kstatfs;
+
+struct super_operations {
+ struct inode * (*alloc_inode)(struct super_block *);
+ void (*destroy_inode)(struct inode *);
+ void (*free_inode)(struct inode *);
+ void (*dirty_inode)(struct inode *, int);
+ int (*write_inode)(struct inode *, struct writeback_control *);
+ int (*drop_inode)(struct inode *);
+ void (*evict_inode)(struct inode *);
+ void (*put_super)(struct super_block *);
+ int (*sync_fs)(struct super_block *, int);
+ int (*freeze_super)(struct super_block *);
+ int (*freeze_fs)(struct super_block *);
+ int (*thaw_super)(struct super_block *);
+ int (*unfreeze_fs)(struct super_block *);
+ int (*statfs)(struct dentry *, struct kstatfs *);
+ int (*remount_fs)(struct super_block *, int *, char *);
+ void (*umount_begin)(struct super_block *);
+ int (*show_options)(struct seq_file *, struct dentry *);
+ int (*show_devname)(struct seq_file *, struct dentry *);
+ int (*show_path)(struct seq_file *, struct dentry *);
+ int (*show_stats)(struct seq_file *, struct dentry *);
+ int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t);
+ long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);
+ long int (*free_cached_objects)(struct super_block *, struct shrink_control *);
+};
+
+struct fid;
+
+struct iomap;
+
+struct export_operations {
+ int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *);
+ struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int);
+ struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int);
+ int (*get_name)(struct dentry *, char *, struct dentry *);
+ struct dentry * (*get_parent)(struct dentry *);
+ int (*commit_metadata)(struct inode *);
+ int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *);
+ int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *);
+ int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *);
+};
+
+struct xattr_handler {
+ const char *name;
+ const char *prefix;
+ int flags;
+ bool (*list)(struct dentry *);
+ int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t);
+ int (*set)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, const void *, size_t, int);
+};
+
+typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int);
+
+struct dir_context {
+ filldir_t actor;
+ loff_t pos;
+};
+
+struct p_log;
+
+struct fs_parameter;
+
+struct fs_parse_result;
+
+typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *);
+
+struct fs_parameter_spec {
+ const char *name;
+ fs_param_type *type;
+ u8 opt;
+ short unsigned int flags;
+ const void *data;
+};
+
+struct attribute {
+ const char *name;
+ umode_t mode;
+ bool ignore_lockdep: 1;
+ struct lock_class_key *key;
+ struct lock_class_key skey;
+};
+
+struct kobj_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t);
+};
+
+typedef void compound_page_dtor(struct page *);
+
+enum compound_dtor_id {
+ NULL_COMPOUND_DTOR = 0,
+ COMPOUND_PAGE_DTOR = 1,
+ HUGETLB_PAGE_DTOR = 2,
+ NR_COMPOUND_DTORS = 3,
+};
+
+enum vm_event_item {
+ PGPGIN = 0,
+ PGPGOUT = 1,
+ PSWPIN = 2,
+ PSWPOUT = 3,
+ PGALLOC_DMA = 4,
+ PGALLOC_DMA32 = 5,
+ PGALLOC_NORMAL = 6,
+ PGALLOC_MOVABLE = 7,
+ ALLOCSTALL_DMA = 8,
+ ALLOCSTALL_DMA32 = 9,
+ ALLOCSTALL_NORMAL = 10,
+ ALLOCSTALL_MOVABLE = 11,
+ PGSCAN_SKIP_DMA = 12,
+ PGSCAN_SKIP_DMA32 = 13,
+ PGSCAN_SKIP_NORMAL = 14,
+ PGSCAN_SKIP_MOVABLE = 15,
+ PGFREE = 16,
+ PGACTIVATE = 17,
+ PGDEACTIVATE = 18,
+ PGLAZYFREE = 19,
+ PGFAULT = 20,
+ PGMAJFAULT = 21,
+ PGLAZYFREED = 22,
+ PGREFILL = 23,
+ PGSTEAL_KSWAPD = 24,
+ PGSTEAL_DIRECT = 25,
+ PGSCAN_KSWAPD = 26,
+ PGSCAN_DIRECT = 27,
+ PGSCAN_DIRECT_THROTTLE = 28,
+ PGSCAN_ANON = 29,
+ PGSCAN_FILE = 30,
+ PGSTEAL_ANON = 31,
+ PGSTEAL_FILE = 32,
+ PGINODESTEAL = 33,
+ SLABS_SCANNED = 34,
+ KSWAPD_INODESTEAL = 35,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY = 36,
+ KSWAPD_HIGH_WMARK_HIT_QUICKLY = 37,
+ PAGEOUTRUN = 38,
+ PGROTATED = 39,
+ DROP_PAGECACHE = 40,
+ DROP_SLAB = 41,
+ OOM_KILL = 42,
+ PGMIGRATE_SUCCESS = 43,
+ PGMIGRATE_FAIL = 44,
+ COMPACTMIGRATE_SCANNED = 45,
+ COMPACTFREE_SCANNED = 46,
+ COMPACTISOLATED = 47,
+ COMPACTSTALL = 48,
+ COMPACTFAIL = 49,
+ COMPACTSUCCESS = 50,
+ KCOMPACTD_WAKE = 51,
+ KCOMPACTD_MIGRATE_SCANNED = 52,
+ KCOMPACTD_FREE_SCANNED = 53,
+ HTLB_BUDDY_PGALLOC = 54,
+ HTLB_BUDDY_PGALLOC_FAIL = 55,
+ UNEVICTABLE_PGCULLED = 56,
+ UNEVICTABLE_PGSCANNED = 57,
+ UNEVICTABLE_PGRESCUED = 58,
+ UNEVICTABLE_PGMLOCKED = 59,
+ UNEVICTABLE_PGMUNLOCKED = 60,
+ UNEVICTABLE_PGCLEARED = 61,
+ UNEVICTABLE_PGSTRANDED = 62,
+ SWAP_RA = 63,
+ SWAP_RA_HIT = 64,
+ NR_VM_EVENT_ITEMS = 65,
+};
+
+struct vm_event_state {
+ long unsigned int event[65];
+};
+
+enum memblock_flags {
+ MEMBLOCK_NONE = 0,
+ MEMBLOCK_HOTPLUG = 1,
+ MEMBLOCK_MIRROR = 2,
+ MEMBLOCK_NOMAP = 4,
+};
+
+struct memblock_region {
+ phys_addr_t base;
+ phys_addr_t size;
+ enum memblock_flags flags;
+};
+
+struct memblock_type {
+ long unsigned int cnt;
+ long unsigned int max;
+ phys_addr_t total_size;
+ struct memblock_region *regions;
+ char *name;
+};
+
+struct memblock {
+ bool bottom_up;
+ phys_addr_t current_limit;
+ struct memblock_type memory;
+ struct memblock_type reserved;
+};
+
+struct debug_store {
+ u64 bts_buffer_base;
+ u64 bts_index;
+ u64 bts_absolute_maximum;
+ u64 bts_interrupt_threshold;
+ u64 pebs_buffer_base;
+ u64 pebs_index;
+ u64 pebs_absolute_maximum;
+ u64 pebs_interrupt_threshold;
+ u64 pebs_event_reset[12];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct debug_store_buffers {
+ char bts_buffer[65536];
+ char pebs_buffer[65536];
+};
+
+struct cea_exception_stacks {
+ char DF_stack_guard[4096];
+ char DF_stack[4096];
+ char NMI_stack_guard[4096];
+ char NMI_stack[4096];
+ char DB_stack_guard[4096];
+ char DB_stack[4096];
+ char MCE_stack_guard[4096];
+ char MCE_stack[4096];
+ char IST_top_guard[4096];
+};
+
+struct cpu_entry_area {
+ char gdt[4096];
+ struct entry_stack_page entry_stack_page;
+ struct tss_struct tss;
+ struct cea_exception_stacks estacks;
+ struct debug_store cpu_debug_store;
+ struct debug_store_buffers cpu_debug_buffers;
+};
+
+struct gdt_page {
+ struct desc_struct gdt[16];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct tlb_context {
+ u64 ctx_id;
+ u64 tlb_gen;
+};
+
+struct tlb_state {
+ struct mm_struct *loaded_mm;
+ union {
+ struct mm_struct *last_user_mm;
+ long unsigned int last_user_mm_ibpb;
+ };
+ u16 loaded_mm_asid;
+ u16 next_asid;
+ bool is_lazy;
+ bool invalidate_other;
+ short unsigned int user_pcid_flush_mask;
+ long unsigned int cr4;
+ struct tlb_context ctxs[6];
+};
+
+enum e820_type {
+ E820_TYPE_RAM = 1,
+ E820_TYPE_RESERVED = 2,
+ E820_TYPE_ACPI = 3,
+ E820_TYPE_NVS = 4,
+ E820_TYPE_UNUSABLE = 5,
+ E820_TYPE_PMEM = 7,
+ E820_TYPE_PRAM = 12,
+ E820_TYPE_SOFT_RESERVED = -268435457,
+ E820_TYPE_RESERVED_KERN = 128,
+};
+
+struct e820_entry {
+ u64 addr;
+ u64 size;
+ enum e820_type type;
+} __attribute__((packed));
+
+struct e820_table {
+ __u32 nr_entries;
+ struct e820_entry entries[131];
+} __attribute__((packed));
+
+struct boot_params_to_save {
+ unsigned int start;
+ unsigned int len;
+};
+
+struct idr {
+ struct xarray idr_rt;
+ unsigned int idr_base;
+ unsigned int idr_next;
+};
+
+struct kernfs_root;
+
+struct kernfs_elem_dir {
+ long unsigned int subdirs;
+ struct rb_root children;
+ struct kernfs_root *root;
+};
+
+struct kernfs_syscall_ops;
+
+struct kernfs_root {
+ struct kernfs_node *kn;
+ unsigned int flags;
+ struct idr ino_idr;
+ u32 last_id_lowbits;
+ u32 id_highbits;
+ struct kernfs_syscall_ops *syscall_ops;
+ struct list_head supers;
+ wait_queue_head_t deactivate_waitq;
+};
+
+struct kernfs_elem_symlink {
+ struct kernfs_node *target_kn;
+};
+
+struct kernfs_ops;
+
+struct kernfs_open_node;
+
+struct kernfs_elem_attr {
+ const struct kernfs_ops *ops;
+ struct kernfs_open_node *open;
+ loff_t size;
+ struct kernfs_node *notify_next;
+};
+
+struct kernfs_iattrs;
+
+struct kernfs_node {
+ atomic_t count;
+ atomic_t active;
+ struct lockdep_map dep_map;
+ struct kernfs_node *parent;
+ const char *name;
+ struct rb_node rb;
+ const void *ns;
+ unsigned int hash;
+ union {
+ struct kernfs_elem_dir dir;
+ struct kernfs_elem_symlink symlink;
+ struct kernfs_elem_attr attr;
+ };
+ void *priv;
+ u64 id;
+ short unsigned int flags;
+ umode_t mode;
+ struct kernfs_iattrs *iattr;
+};
+
+/* Represent old kernfs node present in 5.4 kernels and older */
+union kernfs_node_id {
+ struct {
+ /*
+ * blktrace will export this struct as a simplified 'struct
+ * fid' (which is a big data struction), so userspace can use
+ * it to find kernfs node. The layout must match the first two
+ * fields of 'struct fid' exactly.
+ */
+ u32 ino;
+ u32 generation;
+ };
+ u64 id;
+};
+
+struct kernfs_open_file;
+
+struct kernfs_ops {
+ int (*open)(struct kernfs_open_file *);
+ void (*release)(struct kernfs_open_file *);
+ int (*seq_show)(struct seq_file *, void *);
+ void * (*seq_start)(struct seq_file *, loff_t *);
+ void * (*seq_next)(struct seq_file *, void *, loff_t *);
+ void (*seq_stop)(struct seq_file *, void *);
+ ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t);
+ size_t atomic_write_len;
+ bool prealloc;
+ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t);
+ __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *);
+ int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);
+ struct lock_class_key lockdep_key;
+};
+
+struct kernfs_syscall_ops {
+ int (*show_options)(struct seq_file *, struct kernfs_root *);
+ int (*mkdir)(struct kernfs_node *, const char *, umode_t);
+ int (*rmdir)(struct kernfs_node *);
+ int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);
+ int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *);
+};
+
+struct kernfs_open_file {
+ struct kernfs_node *kn;
+ struct file *file;
+ struct seq_file *seq_file;
+ void *priv;
+ struct mutex mutex;
+ struct mutex prealloc_mutex;
+ int event;
+ struct list_head list;
+ char *prealloc_buf;
+ size_t atomic_write_len;
+ bool mmapped: 1;
+ bool released: 1;
+ const struct vm_operations_struct *vm_ops;
+};
+
+enum kobj_ns_type {
+ KOBJ_NS_TYPE_NONE = 0,
+ KOBJ_NS_TYPE_NET = 1,
+ KOBJ_NS_TYPES = 2,
+};
+
+struct sock;
+
+struct kobj_ns_type_operations {
+ enum kobj_ns_type type;
+ bool (*current_may_mount)();
+ void * (*grab_current_ns)();
+ const void * (*netlink_ns)(struct sock *);
+ const void * (*initial_ns)();
+ void (*drop_ns)(void *);
+};
+
+struct bin_attribute;
+
+struct attribute_group {
+ const char *name;
+ umode_t (*is_visible)(struct kobject *, struct attribute *, int);
+ umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);
+ struct attribute **attrs;
+ struct bin_attribute **bin_attrs;
+};
+
+struct bin_attribute {
+ struct attribute attr;
+ size_t size;
+ void *private;
+ ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t);
+ ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t);
+ int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *);
+};
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
+};
+
+struct kset_uevent_ops;
+
+struct kset {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+ const struct kset_uevent_ops *uevent_ops;
+};
+
+struct kobj_type {
+ void (*release)(struct kobject *);
+ const struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+ const struct attribute_group **default_groups;
+ const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);
+ const void * (*namespace)(struct kobject *);
+ void (*get_ownership)(struct kobject *, kuid_t *, kgid_t *);
+};
+
+struct kobj_uevent_env {
+ char *argv[3];
+ char *envp[64];
+ int envp_idx;
+ char buf[2048];
+ int buflen;
+};
+
+struct kset_uevent_ops {
+ int (* const filter)(struct kset *, struct kobject *);
+ const char * (* const name)(struct kset *, struct kobject *);
+ int (* const uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *);
+};
+
+struct dev_pm_ops {
+ int (*prepare)(struct device *);
+ void (*complete)(struct device *);
+ int (*suspend)(struct device *);
+ int (*resume)(struct device *);
+ int (*freeze)(struct device *);
+ int (*thaw)(struct device *);
+ int (*poweroff)(struct device *);
+ int (*restore)(struct device *);
+ int (*suspend_late)(struct device *);
+ int (*resume_early)(struct device *);
+ int (*freeze_late)(struct device *);
+ int (*thaw_early)(struct device *);
+ int (*poweroff_late)(struct device *);
+ int (*restore_early)(struct device *);
+ int (*suspend_noirq)(struct device *);
+ int (*resume_noirq)(struct device *);
+ int (*freeze_noirq)(struct device *);
+ int (*thaw_noirq)(struct device *);
+ int (*poweroff_noirq)(struct device *);
+ int (*restore_noirq)(struct device *);
+ int (*runtime_suspend)(struct device *);
+ int (*runtime_resume)(struct device *);
+ int (*runtime_idle)(struct device *);
+};
+
+struct pm_subsys_data {
+ spinlock_t lock;
+ unsigned int refcount;
+};
+
+struct dev_pm_domain {
+ struct dev_pm_ops ops;
+ int (*start)(struct device *);
+ void (*detach)(struct device *, bool);
+ int (*activate)(struct device *);
+ void (*sync)(struct device *);
+ void (*dismiss)(struct device *);
+};
+
+struct iommu_ops;
+
+struct subsys_private;
+
+struct bus_type {
+ const char *name;
+ const char *dev_name;
+ struct device *dev_root;
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
+ int (*match)(struct device *, struct device_driver *);
+ int (*uevent)(struct device *, struct kobj_uevent_env *);
+ int (*probe)(struct device *);
+ void (*sync_state)(struct device *);
+ int (*remove)(struct device *);
+ void (*shutdown)(struct device *);
+ int (*online)(struct device *);
+ int (*offline)(struct device *);
+ int (*suspend)(struct device *, pm_message_t);
+ int (*resume)(struct device *);
+ int (*num_vf)(struct device *);
+ int (*dma_configure)(struct device *);
+ const struct dev_pm_ops *pm;
+ const struct iommu_ops *iommu_ops;
+ struct subsys_private *p;
+ struct lock_class_key lock_key;
+ bool need_parent_lock;
+};
+
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY = 0,
+ PROBE_PREFER_ASYNCHRONOUS = 1,
+ PROBE_FORCE_SYNCHRONOUS = 2,
+};
+
+struct of_device_id;
+
+struct acpi_device_id;
+
+struct driver_private;
+
+struct device_driver {
+ const char *name;
+ struct bus_type *bus;
+ struct module *owner;
+ const char *mod_name;
+ bool suppress_bind_attrs;
+ enum probe_type probe_type;
+ const struct of_device_id *of_match_table;
+ const struct acpi_device_id *acpi_match_table;
+ int (*probe)(struct device *);
+ void (*sync_state)(struct device *);
+ int (*remove)(struct device *);
+ void (*shutdown)(struct device *);
+ int (*suspend)(struct device *, pm_message_t);
+ int (*resume)(struct device *);
+ const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
+ const struct dev_pm_ops *pm;
+ void (*coredump)(struct device *);
+ struct driver_private *p;
+};
+
+struct iommu_ops {};
+
+struct device_type {
+ const char *name;
+ const struct attribute_group **groups;
+ int (*uevent)(struct device *, struct kobj_uevent_env *);
+ char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);
+ void (*release)(struct device *);
+ const struct dev_pm_ops *pm;
+};
+
+struct class {
+ const char *name;
+ struct module *owner;
+ const struct attribute_group **class_groups;
+ const struct attribute_group **dev_groups;
+ struct kobject *dev_kobj;
+ int (*dev_uevent)(struct device *, struct kobj_uevent_env *);
+ char * (*devnode)(struct device *, umode_t *);
+ void (*class_release)(struct class *);
+ void (*dev_release)(struct device *);
+ int (*shutdown_pre)(struct device *);
+ const struct kobj_ns_type_operations *ns_type;
+ const void * (*namespace)(struct device *);
+ void (*get_ownership)(struct device *, kuid_t *, kgid_t *);
+ const struct dev_pm_ops *pm;
+ struct subsys_private *p;
+};
+
+struct of_device_id {
+ char name[32];
+ char type[32];
+ char compatible[128];
+ const void *data;
+};
+
+typedef long unsigned int kernel_ulong_t;
+
+struct acpi_device_id {
+ __u8 id[9];
+ kernel_ulong_t driver_data;
+ __u32 cls;
+ __u32 cls_msk;
+};
+
+struct device_dma_parameters {
+ unsigned int max_segment_size;
+ long unsigned int segment_boundary_mask;
+};
+
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+struct sg_table;
+
+struct scatterlist;
+
+struct dma_map_ops {
+ void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int);
+ void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int);
+ int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int);
+ int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int);
+ dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int);
+ void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
+ int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
+ void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
+ dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int);
+ void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
+ void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+ void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+ void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction);
+ void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction);
+ void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction);
+ int (*dma_supported)(struct device *, u64);
+ u64 (*get_required_mask)(struct device *);
+ size_t (*max_mapping_size)(struct device *);
+ long unsigned int (*get_merge_boundary)(struct device *);
+};
+
+struct node {
+ struct device dev;
+ struct list_head access_list;
+ struct work_struct node_work;
+};
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED = 0,
+ CPU_SMT_DISABLED = 1,
+ CPU_SMT_FORCE_DISABLED = 2,
+ CPU_SMT_NOT_SUPPORTED = 3,
+ CPU_SMT_NOT_IMPLEMENTED = 4,
+};
+
+struct cpu_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int rev;
+};
+
+struct ucode_cpu_info {
+ struct cpu_signature cpu_sig;
+ int valid;
+ void *mc;
+};
+
+typedef long unsigned int pto_T__;
+
+struct kobj_attribute___2;
+
+struct file_system_type___2;
+
+struct file_operations___2;
+
+struct atomic_notifier_head___2;
+
+typedef short int __s16;
+
+typedef __s16 s16;
+
+typedef long unsigned int irq_hw_number_t;
+
+struct kernel_symbol {
+ int value_offset;
+ int name_offset;
+ int namespace_offset;
+};
+
+typedef int (*initcall_t)();
+
+struct obs_kernel_param {
+ const char *str;
+ int (*setup_func)(char *);
+ int early;
+};
+
+enum ftrace_dump_mode {
+ DUMP_NONE = 0,
+ DUMP_ALL = 1,
+ DUMP_ORIG = 2,
+};
+
+struct bug_entry {
+ int bug_addr_disp;
+ int file_disp;
+ short unsigned int line;
+ short unsigned int flags;
+};
+
+struct pollfd {
+ int fd;
+ short int events;
+ short int revents;
+};
+
+struct orc_entry {
+ s16 sp_offset;
+ s16 bp_offset;
+ unsigned int sp_reg: 4;
+ unsigned int bp_reg: 4;
+ unsigned int type: 2;
+ unsigned int end: 1;
+} __attribute__((packed));
+
+typedef const int tracepoint_ptr_t;
+
+struct bpf_raw_event_map {
+ struct tracepoint *tp;
+ void *bpf_func;
+ u32 num_args;
+ u32 writable_size;
+ long: 64;
+};
+
+enum perf_event_state {
+ PERF_EVENT_STATE_DEAD = -4,
+ PERF_EVENT_STATE_EXIT = -3,
+ PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_OFF = -1,
+ PERF_EVENT_STATE_INACTIVE = 0,
+ PERF_EVENT_STATE_ACTIVE = 1,
+};
+
+typedef struct {
+ atomic_long_t a;
+} local_t;
+
+typedef struct {
+ local_t a;
+} local64_t;
+
+struct perf_event_attr {
+ __u32 type;
+ __u32 size;
+ __u64 config;
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+ __u64 sample_type;
+ __u64 read_format;
+ __u64 disabled: 1;
+ __u64 inherit: 1;
+ __u64 pinned: 1;
+ __u64 exclusive: 1;
+ __u64 exclude_user: 1;
+ __u64 exclude_kernel: 1;
+ __u64 exclude_hv: 1;
+ __u64 exclude_idle: 1;
+ __u64 mmap: 1;
+ __u64 comm: 1;
+ __u64 freq: 1;
+ __u64 inherit_stat: 1;
+ __u64 enable_on_exec: 1;
+ __u64 task: 1;
+ __u64 watermark: 1;
+ __u64 precise_ip: 2;
+ __u64 mmap_data: 1;
+ __u64 sample_id_all: 1;
+ __u64 exclude_host: 1;
+ __u64 exclude_guest: 1;
+ __u64 exclude_callchain_kernel: 1;
+ __u64 exclude_callchain_user: 1;
+ __u64 mmap2: 1;
+ __u64 comm_exec: 1;
+ __u64 use_clockid: 1;
+ __u64 context_switch: 1;
+ __u64 write_backward: 1;
+ __u64 namespaces: 1;
+ __u64 ksymbol: 1;
+ __u64 bpf_event: 1;
+ __u64 aux_output: 1;
+ __u64 cgroup: 1;
+ __u64 text_poke: 1;
+ __u64 build_id: 1;
+ __u64 inherit_thread: 1;
+ __u64 remove_on_exec: 1;
+ __u64 sigtrap: 1;
+ __u64 __reserved_1: 26;
+
+ union {
+ __u32 wakeup_events;
+ __u32 wakeup_watermark;
+ };
+ __u32 bp_type;
+ union {
+ __u64 bp_addr;
+ __u64 kprobe_func;
+ __u64 uprobe_path;
+ __u64 config1;
+ };
+ union {
+ __u64 bp_len;
+ __u64 kprobe_addr;
+ __u64 probe_offset;
+ __u64 config2;
+ };
+ __u64 branch_sample_type;
+ __u64 sample_regs_user;
+ __u32 sample_stack_user;
+ __s32 clockid;
+ __u64 sample_regs_intr;
+ __u32 aux_watermark;
+ __u16 sample_max_stack;
+ __u16 __reserved_2;
+ __u32 aux_sample_size;
+ __u32 __reserved_3;
+};
+
+struct hw_perf_event_extra {
+ u64 config;
+ unsigned int reg;
+ int alloc;
+ int idx;
+};
+
+struct arch_hw_breakpoint {
+ long unsigned int address;
+ long unsigned int mask;
+ u8 len;
+ u8 type;
+};
+
+struct hw_perf_event {
+ union {
+ struct {
+ u64 config;
+ u64 last_tag;
+ long unsigned int config_base;
+ long unsigned int event_base;
+ int event_base_rdpmc;
+ int idx;
+ int last_cpu;
+ int flags;
+ struct hw_perf_event_extra extra_reg;
+ struct hw_perf_event_extra branch_reg;
+ };
+ struct {
+ struct hrtimer hrtimer;
+ };
+ struct {
+ struct list_head tp_list;
+ };
+ struct {
+ u64 pwr_acc;
+ u64 ptsc;
+ };
+ struct {
+ struct arch_hw_breakpoint info;
+ struct list_head bp_list;
+ };
+ struct {
+ u8 iommu_bank;
+ u8 iommu_cntr;
+ u16 padding;
+ u64 conf;
+ u64 conf1;
+ };
+ };
+ struct task_struct *target;
+ void *addr_filters;
+ long unsigned int addr_filters_gen;
+ int state;
+ local64_t prev_count;
+ u64 sample_period;
+ u64 last_period;
+ local64_t period_left;
+ u64 interrupts_seq;
+ u64 interrupts;
+ u64 freq_time_stamp;
+ u64 freq_count_stamp;
+};
+
+struct irq_work {
+ struct llist_node llnode;
+ atomic_t flags;
+ void (*func)(struct irq_work *);
+};
+
+struct perf_addr_filters_head {
+ struct list_head list;
+ raw_spinlock_t lock;
+ unsigned int nr_file_filters;
+};
+
+struct perf_sample_data;
+
+typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *);
+
+struct pmu;
+
+struct perf_buffer;
+
+struct perf_addr_filter_range;
+
+struct bpf_prog;
+
+struct trace_event_call;
+
+struct event_filter;
+
+struct perf_cgroup;
+
+struct perf_event {
+ struct list_head event_entry;
+ struct list_head sibling_list;
+ struct list_head active_list;
+ struct rb_node group_node;
+ u64 group_index;
+ struct list_head migrate_entry;
+ struct hlist_node hlist_entry;
+ struct list_head active_entry;
+ int nr_siblings;
+ int event_caps;
+ int group_caps;
+ struct perf_event *group_leader;
+ struct pmu *pmu;
+ void *pmu_private;
+ enum perf_event_state state;
+ unsigned int attach_state;
+ local64_t count;
+ atomic64_t child_count;
+ u64 total_time_enabled;
+ u64 total_time_running;
+ u64 tstamp;
+ u64 shadow_ctx_time;
+ struct perf_event_attr attr;
+ u16 header_size;
+ u16 id_header_size;
+ u16 read_size;
+ struct hw_perf_event hw;
+ struct perf_event_context *ctx;
+ atomic_long_t refcount;
+ atomic64_t child_total_time_enabled;
+ atomic64_t child_total_time_running;
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_event *parent;
+ int oncpu;
+ int cpu;
+ struct list_head owner_entry;
+ struct task_struct *owner;
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+ struct perf_buffer *rb;
+ struct list_head rb_entry;
+ long unsigned int rcu_batches;
+ int rcu_pending;
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
+ struct irq_work pending;
+ atomic_t event_limit;
+ struct perf_addr_filters_head addr_filters;
+ struct perf_addr_filter_range *addr_filter_ranges;
+ long unsigned int addr_filters_gen;
+ struct perf_event *aux_event;
+ void (*destroy)(struct perf_event *);
+ struct callback_head callback_head;
+ struct pid_namespace *ns;
+ u64 id;
+ u64 (*clock)();
+ perf_overflow_handler_t overflow_handler;
+ void *overflow_handler_context;
+ perf_overflow_handler_t orig_overflow_handler;
+ struct bpf_prog *prog;
+ struct trace_event_call *tp_event;
+ struct event_filter *filter;
+ struct perf_cgroup *cgrp;
+ void *security;
+ struct list_head sb_list;
+};
+
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0,
+ LD_WAIT_FREE = 1,
+ LD_WAIT_SPIN = 2,
+ LD_WAIT_CONFIG = 2,
+ LD_WAIT_SLEEP = 3,
+ LD_WAIT_MAX = 4,
+};
+
+struct uid_gid_extent {
+ u32 first;
+ u32 lower_first;
+ u32 count;
+};
+
+struct uid_gid_map {
+ u32 nr_extents;
+ union {
+ struct uid_gid_extent extent[5];
+ struct {
+ struct uid_gid_extent *forward;
+ struct uid_gid_extent *reverse;
+ };
+ };
+};
+
+struct proc_ns_operations;
+
+struct ns_common {
+ atomic_long_t stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+};
+
+struct ctl_table_root;
+
+struct ctl_table_set;
+
+struct ctl_dir;
+
+struct ctl_node;
+
+struct ctl_table_header {
+ union {
+ struct {
+ struct ctl_table *ctl_table;
+ int used;
+ int count;
+ int nreg;
+ };
+ struct callback_head rcu;
+ };
+ struct completion *unregistering;
+ struct ctl_table *ctl_table_arg;
+ struct ctl_table_root *root;
+ struct ctl_table_set *set;
+ struct ctl_dir *parent;
+ struct ctl_node *node;
+ struct hlist_head inodes;
+};
+
+struct ctl_dir {
+ struct ctl_table_header header;
+ struct rb_root root;
+};
+
+struct ctl_table_set {
+ int (*is_seen)(struct ctl_table_set *);
+ struct ctl_dir dir;
+};
+
+struct ucounts;
+
+struct user_namespace {
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
+ struct uid_gid_map projid_map;
+ atomic_t count;
+ struct user_namespace *parent;
+ int level;
+ kuid_t owner;
+ kgid_t group;
+ struct ns_common ns;
+ long unsigned int flags;
+ struct work_struct work;
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+ struct ucounts *ucounts;
+ int ucount_max[10];
+};
+
+enum node_states {
+ N_POSSIBLE = 0,
+ N_ONLINE = 1,
+ N_NORMAL_MEMORY = 2,
+ N_HIGH_MEMORY = 2,
+ N_MEMORY = 3,
+ N_CPU = 4,
+ NR_NODE_STATES = 5,
+};
+
+struct timer_list {
+ struct hlist_node entry;
+ long unsigned int expires;
+ void (*function)(struct timer_list *);
+ u32 flags;
+ struct lockdep_map lockdep_map;
+};
+
+struct delayed_work {
+ struct work_struct work;
+ struct timer_list timer;
+ struct workqueue_struct *wq;
+ int cpu;
+};
+
+struct rcu_work {
+ struct work_struct work;
+ struct callback_head rcu;
+ struct workqueue_struct *wq;
+};
+
+struct rcu_segcblist {
+ struct callback_head *head;
+ struct callback_head **tails[4];
+ long unsigned int gp_seq[4];
+ atomic_long_t len;
+ u8 enabled;
+ u8 offloaded;
+};
+
+struct srcu_node;
+
+struct srcu_struct;
+
+struct srcu_data {
+ long unsigned int srcu_lock_count[2];
+ long unsigned int srcu_unlock_count[2];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ spinlock_t lock;
+ struct rcu_segcblist srcu_cblist;
+ long unsigned int srcu_gp_seq_needed;
+ long unsigned int srcu_gp_seq_needed_exp;
+ bool srcu_cblist_invoking;
+ struct timer_list delay_work;
+ struct work_struct work;
+ struct callback_head srcu_barrier_head;
+ struct srcu_node *mynode;
+ long unsigned int grpmask;
+ int cpu;
+ struct srcu_struct *ssp;
+ long: 64;
+};
+
+struct srcu_node {
+ spinlock_t lock;
+ long unsigned int srcu_have_cbs[4];
+ long unsigned int srcu_data_have_cbs[4];
+ long unsigned int srcu_gp_seq_needed_exp;
+ struct srcu_node *srcu_parent;
+ int grplo;
+ int grphi;
+};
+
+struct srcu_struct {
+ struct srcu_node node[16];
+ struct srcu_node *level[3];
+ struct mutex srcu_cb_mutex;
+ spinlock_t lock;
+ struct mutex srcu_gp_mutex;
+ unsigned int srcu_idx;
+ long unsigned int srcu_gp_seq;
+ long unsigned int srcu_gp_seq_needed;
+ long unsigned int srcu_gp_seq_needed_exp;
+ long unsigned int srcu_last_gp_end;
+ struct srcu_data *sda;
+ long unsigned int srcu_barrier_seq;
+ struct mutex srcu_barrier_mutex;
+ struct completion srcu_barrier_completion;
+ atomic_t srcu_barrier_cpu_cnt;
+ struct delayed_work work;
+ struct lockdep_map dep_map;
+};
+
+struct cgroup;
+
+struct cgroup_subsys;
+
+struct cgroup_subsys_state {
+ struct cgroup *cgroup;
+ struct cgroup_subsys *ss;
+ struct percpu_ref refcnt;
+ struct list_head sibling;
+ struct list_head children;
+ struct list_head rstat_css_node;
+ int id;
+ unsigned int flags;
+ u64 serial_nr;
+ atomic_t online_cnt;
+ struct work_struct destroy_work;
+ struct rcu_work destroy_rwork;
+ struct cgroup_subsys_state *parent;
+};
+
+struct mem_cgroup_id {
+ int id;
+ refcount_t ref;
+};
+
+struct page_counter {
+ atomic_long_t usage;
+ long unsigned int min;
+ long unsigned int low;
+ long unsigned int high;
+ long unsigned int max;
+ struct page_counter *parent;
+ long unsigned int emin;
+ atomic_long_t min_usage;
+ atomic_long_t children_min_usage;
+ long unsigned int elow;
+ atomic_long_t low_usage;
+ atomic_long_t children_low_usage;
+ long unsigned int watermark;
+ long unsigned int failcnt;
+};
+
+struct vmpressure {
+ long unsigned int scanned;
+ long unsigned int reclaimed;
+ long unsigned int tree_scanned;
+ long unsigned int tree_reclaimed;
+ spinlock_t sr_lock;
+ struct list_head events;
+ struct mutex events_lock;
+ struct work_struct work;
+};
+
+struct cgroup_file {
+ struct kernfs_node *kn;
+ long unsigned int notified_at;
+ struct timer_list notify_timer;
+};
+
+struct mem_cgroup_threshold_ary;
+
+struct mem_cgroup_thresholds {
+ struct mem_cgroup_threshold_ary *primary;
+ struct mem_cgroup_threshold_ary *spare;
+};
+
+struct memcg_padding {
+ char x[0];
+};
+
+enum memcg_kmem_state {
+ KMEM_NONE = 0,
+ KMEM_ALLOCATED = 1,
+ KMEM_ONLINE = 2,
+};
+
+struct fprop_global {
+ struct percpu_counter events;
+ unsigned int period;
+ seqcount_t sequence;
+};
+
+struct wb_domain {
+ spinlock_t lock;
+ struct fprop_global completions;
+ struct timer_list period_timer;
+ long unsigned int period_time;
+ long unsigned int dirty_limit_tstamp;
+ long unsigned int dirty_limit;
+};
+
+struct wb_completion {
+ atomic_t cnt;
+ wait_queue_head_t *waitq;
+};
+
+struct memcg_cgwb_frn {
+ u64 bdi_id;
+ int memcg_id;
+ u64 at;
+ struct wb_completion done;
+};
+
+struct memcg_vmstats_percpu;
+
+struct mem_cgroup_per_node;
+
+struct mem_cgroup {
+ struct cgroup_subsys_state css;
+ struct mem_cgroup_id id;
+ struct page_counter memory;
+ struct page_counter swap;
+ struct page_counter memsw;
+ struct page_counter kmem;
+ struct page_counter tcpmem;
+ struct work_struct high_work;
+ long unsigned int soft_limit;
+ struct vmpressure vmpressure;
+ bool use_hierarchy;
+ bool oom_group;
+ bool oom_lock;
+ int under_oom;
+ int swappiness;
+ int oom_kill_disable;
+ struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
+ struct cgroup_file swap_events_file;
+ struct mutex thresholds_lock;
+ struct mem_cgroup_thresholds thresholds;
+ struct mem_cgroup_thresholds memsw_thresholds;
+ struct list_head oom_notify;
+ long unsigned int move_charge_at_immigrate;
+ spinlock_t move_lock;
+ long unsigned int move_lock_flags;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct memcg_padding _pad1_;
+ atomic_t moving_account;
+ struct task_struct *move_lock_task;
+ struct memcg_vmstats_percpu *vmstats_local;
+ struct memcg_vmstats_percpu *vmstats_percpu;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct memcg_padding _pad2_;
+ atomic_long_t vmstats[36];
+ atomic_long_t vmevents[65];
+ atomic_long_t memory_events[8];
+ atomic_long_t memory_events_local[8];
+ long unsigned int socket_pressure;
+ bool tcpmem_active;
+ int tcpmem_pressure;
+ int kmemcg_id;
+ enum memcg_kmem_state kmem_state;
+ struct list_head kmem_caches;
+ struct list_head cgwb_list;
+ struct wb_domain cgwb_domain;
+ struct memcg_cgwb_frn cgwb_frn[4];
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+ struct mem_cgroup_per_node *nodeinfo[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct anon_vma {
+ struct anon_vma *root;
+ struct rw_semaphore rwsem;
+ atomic_t refcount;
+ unsigned int degree;
+ struct anon_vma *parent;
+ struct rb_root_cached rb_root;
+};
+
+struct linux_binprm;
+
+struct coredump_params;
+
+struct linux_binfmt {
+ struct list_head lh;
+ struct module *module;
+ int (*load_binary)(struct linux_binprm *);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(struct coredump_params *);
+ long unsigned int min_coredump;
+};
+
+enum lru_list {
+ LRU_INACTIVE_ANON = 0,
+ LRU_ACTIVE_ANON = 1,
+ LRU_INACTIVE_FILE = 2,
+ LRU_ACTIVE_FILE = 3,
+ LRU_UNEVICTABLE = 4,
+ NR_LRU_LISTS = 5,
+};
+
+typedef void (*smp_call_func_t)(void *);
+
+struct __call_single_data {
+ struct llist_node llist;
+ unsigned int flags;
+ smp_call_func_t func;
+ void *info;
+};
+
+struct ctl_node {
+ struct rb_node node;
+ struct ctl_table_header *header;
+};
+
+struct ctl_table_root {
+ struct ctl_table_set default_set;
+ struct ctl_table_set * (*lookup)(struct ctl_table_root *);
+ void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *);
+ int (*permissions)(struct ctl_table_header *, struct ctl_table *);
+};
+
+enum umh_disable_depth {
+ UMH_ENABLED = 0,
+ UMH_FREEZING = 1,
+ UMH_DISABLED = 2,
+};
+
+struct va_alignment {
+ int flags;
+ long unsigned int mask;
+ long unsigned int bits;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef __u64 Elf64_Addr;
+
+typedef __u16 Elf64_Half;
+
+typedef __u32 Elf64_Word;
+
+typedef __u64 Elf64_Xword;
+
+typedef __s64 Elf64_Sxword;
+
+typedef struct {
+ Elf64_Sxword d_tag;
+ union {
+ Elf64_Xword d_val;
+ Elf64_Addr d_ptr;
+ } d_un;
+} Elf64_Dyn;
+
+struct elf64_sym {
+ Elf64_Word st_name;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf64_Half st_shndx;
+ Elf64_Addr st_value;
+ Elf64_Xword st_size;
+};
+
+typedef struct elf64_sym Elf64_Sym;
+
+struct seq_file {
+ char *buf;
+ size_t size;
+ size_t from;
+ size_t count;
+ size_t pad_until;
+ loff_t index;
+ loff_t read_pos;
+ struct mutex lock;
+ const struct seq_operations *op;
+ int poll_event;
+ const struct file *file;
+ void *private;
+};
+
+typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
+
+struct poll_table_struct {
+ poll_queue_proc _qproc;
+ __poll_t _key;
+};
+
+struct kernel_param;
+
+struct kernel_param_ops {
+ unsigned int flags;
+ int (*set)(const char *, const struct kernel_param *);
+ int (*get)(char *, const struct kernel_param *);
+ void (*free)(void *);
+};
+
+struct kparam_string;
+
+struct kparam_array;
+
+struct kernel_param {
+ const char *name;
+ struct module *mod;
+ const struct kernel_param_ops *ops;
+ const u16 perm;
+ s8 level;
+ u8 flags;
+ union {
+ void *arg;
+ const struct kparam_string *str;
+ const struct kparam_array *arr;
+ };
+};
+
+struct kparam_string {
+ unsigned int maxlen;
+ char *string;
+};
+
+struct kparam_array {
+ unsigned int max;
+ unsigned int elemsize;
+ unsigned int *num;
+ const struct kernel_param_ops *ops;
+ void *elem;
+};
+
+enum module_state {
+ MODULE_STATE_LIVE = 0,
+ MODULE_STATE_COMING = 1,
+ MODULE_STATE_GOING = 2,
+ MODULE_STATE_UNFORMED = 3,
+};
+
+struct module_param_attrs;
+
+struct module_kobject {
+ struct kobject kobj;
+ struct module *mod;
+ struct kobject *drivers_dir;
+ struct module_param_attrs *mp;
+ struct completion *kobj_completion;
+};
+
+struct latch_tree_node {
+ struct rb_node node[2];
+};
+
+struct mod_tree_node {
+ struct module *mod;
+ struct latch_tree_node node;
+};
+
+struct module_layout {
+ void *base;
+ unsigned int size;
+ unsigned int text_size;
+ unsigned int ro_size;
+ unsigned int ro_after_init_size;
+ struct mod_tree_node mtn;
+};
+
+struct mod_arch_specific {
+ unsigned int num_orcs;
+ int *orc_unwind_ip;
+ struct orc_entry *orc_unwind;
+};
+
+struct mod_kallsyms {
+ Elf64_Sym *symtab;
+ unsigned int num_symtab;
+ char *strtab;
+ char *typetab;
+};
+
+struct module_attribute;
+
+struct exception_table_entry;
+
+struct module_sect_attrs;
+
+struct module_notes_attrs;
+
+struct trace_eval_map;
+
+struct error_injection_entry;
+
+struct module {
+ enum module_state state;
+ struct list_head list;
+ char name[56];
+ struct module_kobject mkobj;
+ struct module_attribute *modinfo_attrs;
+ const char *version;
+ const char *srcversion;
+ struct kobject *holders_dir;
+ const struct kernel_symbol *syms;
+ const s32 *crcs;
+ unsigned int num_syms;
+ struct mutex param_lock;
+ struct kernel_param *kp;
+ unsigned int num_kp;
+ unsigned int num_gpl_syms;
+ const struct kernel_symbol *gpl_syms;
+ const s32 *gpl_crcs;
+ const struct kernel_symbol *unused_syms;
+ const s32 *unused_crcs;
+ unsigned int num_unused_syms;
+ unsigned int num_unused_gpl_syms;
+ const struct kernel_symbol *unused_gpl_syms;
+ const s32 *unused_gpl_crcs;
+ bool async_probe_requested;
+ const struct kernel_symbol *gpl_future_syms;
+ const s32 *gpl_future_crcs;
+ unsigned int num_gpl_future_syms;
+ unsigned int num_exentries;
+ struct exception_table_entry *extable;
+ int (*init)();
+ long: 64;
+ long: 64;
+ struct module_layout core_layout;
+ struct module_layout init_layout;
+ struct mod_arch_specific arch;
+ long unsigned int taints;
+ unsigned int num_bugs;
+ struct list_head bug_list;
+ struct bug_entry *bug_table;
+ struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms core_kallsyms;
+ struct module_sect_attrs *sect_attrs;
+ struct module_notes_attrs *notes_attrs;
+ char *args;
+ void *percpu;
+ unsigned int percpu_size;
+ void *noinstr_text_start;
+ unsigned int noinstr_text_size;
+ unsigned int num_tracepoints;
+ tracepoint_ptr_t *tracepoints_ptrs;
+ unsigned int num_srcu_structs;
+ struct srcu_struct **srcu_struct_ptrs;
+ unsigned int num_bpf_raw_events;
+ struct bpf_raw_event_map *bpf_raw_events;
+ unsigned int num_trace_bprintk_fmt;
+ const char **trace_bprintk_fmt_start;
+ struct trace_event_call **trace_events;
+ unsigned int num_trace_events;
+ struct trace_eval_map **trace_evals;
+ unsigned int num_trace_evals;
+ void *kprobes_text_start;
+ unsigned int kprobes_text_size;
+ long unsigned int *kprobe_blacklist;
+ unsigned int num_kprobe_blacklist;
+ struct list_head source_list;
+ struct list_head target_list;
+ void (*exit)();
+ atomic_t refcnt;
+ struct error_injection_entry *ei_funcs;
+ unsigned int num_ei_funcs;
+ long: 32;
+ long: 64;
+};
+
+struct error_injection_entry {
+ long unsigned int addr;
+ int etype;
+};
+
+struct module_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *);
+ ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t);
+ void (*setup)(struct module *, const char *);
+ int (*test)(struct module *);
+ void (*free)(struct module *);
+};
+
+struct exception_table_entry {
+ int insn;
+ int fixup;
+ int handler;
+};
+
+struct trace_event_functions;
+
+struct trace_event {
+ struct hlist_node node;
+ struct list_head list;
+ int type;
+ struct trace_event_functions *funcs;
+};
+
+struct trace_event_class;
+
+struct bpf_prog_array;
+
+struct trace_event_call {
+ struct list_head list;
+ struct trace_event_class *class;
+ union {
+ char *name;
+ struct tracepoint *tp;
+ };
+ struct trace_event event;
+ char *print_fmt;
+ struct event_filter *filter;
+ void *mod;
+ void *data;
+ int flags;
+ int perf_refcount;
+ struct hlist_head *perf_events;
+ struct bpf_prog_array *prog_array;
+ int (*perf_perm)(struct trace_event_call *, struct perf_event *);
+};
+
+struct trace_eval_map {
+ const char *system;
+ const char *eval_string;
+ long unsigned int eval_value;
+};
+
+struct pid_namespace {
+ struct kref kref;
+ struct idr idr;
+ struct callback_head rcu;
+ unsigned int pid_allocated;
+ struct task_struct *child_reaper;
+ struct kmem_cache *pid_cachep;
+ unsigned int level;
+ struct pid_namespace *parent;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ int reboot;
+ struct ns_common ns;
+};
+
+struct task_cputime {
+ u64 stime;
+ u64 utime;
+ long long unsigned int sum_exec_runtime;
+};
+
+struct uts_namespace;
+
+struct ipc_namespace;
+
+struct mnt_namespace;
+
+struct net;
+
+struct time_namespace;
+
+struct cgroup_namespace;
+
+struct nsproxy {
+ atomic_t count;
+ struct uts_namespace *uts_ns;
+ struct ipc_namespace *ipc_ns;
+ struct mnt_namespace *mnt_ns;
+ struct pid_namespace *pid_ns_for_children;
+ struct net *net_ns;
+ struct time_namespace *time_ns;
+ struct time_namespace *time_ns_for_children;
+ struct cgroup_namespace *cgroup_ns;
+};
+
+struct bio;
+
+struct bio_list {
+ struct bio *head;
+ struct bio *tail;
+};
+
+struct blk_plug {
+ struct list_head mq_list;
+ struct list_head cb_list;
+ short unsigned int rq_count;
+ bool multiple_queues;
+};
+
+struct reclaim_state {
+ long unsigned int reclaimed_slab;
+};
+
+typedef int congested_fn(void *, int);
+
+struct fprop_local_percpu {
+ struct percpu_counter events;
+ unsigned int period;
+ raw_spinlock_t lock;
+};
+
+enum wb_reason {
+ WB_REASON_BACKGROUND = 0,
+ WB_REASON_VMSCAN = 1,
+ WB_REASON_SYNC = 2,
+ WB_REASON_PERIODIC = 3,
+ WB_REASON_LAPTOP_TIMER = 4,
+ WB_REASON_FS_FREE_SPACE = 5,
+ WB_REASON_FORKER_THREAD = 6,
+ WB_REASON_FOREIGN_FLUSH = 7,
+ WB_REASON_MAX = 8,
+};
+
+struct bdi_writeback_congested;
+
+struct bdi_writeback {
+ struct backing_dev_info *bdi;
+ long unsigned int state;
+ long unsigned int last_old_flush;
+ struct list_head b_dirty;
+ struct list_head b_io;
+ struct list_head b_more_io;
+ struct list_head b_dirty_time;
+ spinlock_t list_lock;
+ struct percpu_counter stat[4];
+ struct bdi_writeback_congested *congested;
+ long unsigned int bw_time_stamp;
+ long unsigned int dirtied_stamp;
+ long unsigned int written_stamp;
+ long unsigned int write_bandwidth;
+ long unsigned int avg_write_bandwidth;
+ long unsigned int dirty_ratelimit;
+ long unsigned int balanced_dirty_ratelimit;
+ struct fprop_local_percpu completions;
+ int dirty_exceeded;
+ enum wb_reason start_all_reason;
+ spinlock_t work_lock;
+ struct list_head work_list;
+ struct delayed_work dwork;
+ long unsigned int dirty_sleep;
+ struct list_head bdi_node;
+ struct percpu_ref refcnt;
+ struct fprop_local_percpu memcg_completions;
+ struct cgroup_subsys_state *memcg_css;
+ struct cgroup_subsys_state *blkcg_css;
+ struct list_head memcg_node;
+ struct list_head blkcg_node;
+ union {
+ struct work_struct release_work;
+ struct callback_head rcu;
+ };
+};
+
+struct backing_dev_info {
+ u64 id;
+ struct rb_node rb_node;
+ struct list_head bdi_list;
+ long unsigned int ra_pages;
+ long unsigned int io_pages;
+ congested_fn *congested_fn;
+ void *congested_data;
+ struct kref refcnt;
+ unsigned int capabilities;
+ unsigned int min_ratio;
+ unsigned int max_ratio;
+ unsigned int max_prop_frac;
+ atomic_long_t tot_write_bandwidth;
+ struct bdi_writeback wb;
+ struct list_head wb_list;
+ struct xarray cgwb_tree;
+ struct rb_root cgwb_congested_tree;
+ struct mutex cgwb_release_mutex;
+ struct rw_semaphore wb_switch_rwsem;
+ wait_queue_head_t wb_waitq;
+ struct device *dev;
+ char dev_name[64];
+ struct device *owner;
+ struct timer_list laptop_mode_wb_timer;
+ struct dentry *debug_dir;
+};
+
+struct css_set {
+ struct cgroup_subsys_state *subsys[11];
+ refcount_t refcount;
+ struct css_set *dom_cset;
+ struct cgroup *dfl_cgrp;
+ int nr_tasks;
+ struct list_head tasks;
+ struct list_head mg_tasks;
+ struct list_head dying_tasks;
+ struct list_head task_iters;
+ struct list_head e_cset_node[11];
+ struct list_head threaded_csets;
+ struct list_head threaded_csets_node;
+ struct hlist_node hlist;
+ struct list_head cgrp_links;
+ struct list_head mg_preload_node;
+ struct list_head mg_node;
+ struct cgroup *mg_src_cgrp;
+ struct cgroup *mg_dst_cgrp;
+ struct css_set *mg_dst_cset;
+ bool dead;
+ struct callback_head callback_head;
+};
+
+struct perf_event_groups {
+ struct rb_root tree;
+ u64 index;
+};
+
+struct perf_event_context {
+ struct pmu *pmu;
+ raw_spinlock_t lock;
+ struct mutex mutex;
+ struct list_head active_ctx_list;
+ struct perf_event_groups pinned_groups;
+ struct perf_event_groups flexible_groups;
+ struct list_head event_list;
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+ int nr_events;
+ int nr_active;
+ int is_active;
+ int nr_stat;
+ int nr_freq;
+ int rotate_disable;
+ int rotate_necessary;
+ refcount_t refcount;
+ struct task_struct *task;
+ u64 time;
+ u64 timestamp;
+ struct perf_event_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ int nr_cgroups;
+ void *task_ctx_data;
+ struct callback_head callback_head;
+};
+
+typedef unsigned int blk_qc_t;
+
+typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *);
+
+struct blk_rq_stat {
+ u64 mean;
+ u64 min;
+ u64 max;
+ u32 nr_samples;
+ u64 batch;
+};
+
+enum blk_zoned_model {
+ BLK_ZONED_NONE = 0,
+ BLK_ZONED_HA = 1,
+ BLK_ZONED_HM = 2,
+};
+
+struct queue_limits {
+ long unsigned int bounce_pfn;
+ long unsigned int seg_boundary_mask;
+ long unsigned int virt_boundary_mask;
+ unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
+ unsigned int chunk_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int logical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+ unsigned int max_discard_sectors;
+ unsigned int max_hw_discard_sectors;
+ unsigned int max_write_same_sectors;
+ unsigned int max_write_zeroes_sectors;
+ unsigned int max_zone_append_sectors;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
+ short unsigned int max_segments;
+ short unsigned int max_integrity_segments;
+ short unsigned int max_discard_segments;
+ unsigned char misaligned;
+ unsigned char discard_misaligned;
+ unsigned char raid_partial_stripes_expensive;
+ enum blk_zoned_model zoned;
+};
+
+struct bsg_ops;
+
+struct bsg_class_device {
+ struct device *class_dev;
+ int minor;
+ struct request_queue *queue;
+ const struct bsg_ops *ops;
+};
+
+typedef void *mempool_alloc_t(gfp_t, void *);
+
+typedef void mempool_free_t(void *, void *);
+
+struct mempool_s {
+ spinlock_t lock;
+ int min_nr;
+ int curr_nr;
+ void **elements;
+ void *pool_data;
+ mempool_alloc_t *alloc;
+ mempool_free_t *free;
+ wait_queue_head_t wait;
+};
+
+typedef struct mempool_s mempool_t;
+
+struct bio_set {
+ struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+ mempool_t bio_pool;
+ mempool_t bvec_pool;
+ spinlock_t rescue_lock;
+ struct bio_list rescue_list;
+ struct work_struct rescue_work;
+ struct workqueue_struct *rescue_workqueue;
+};
+
+struct request;
+
+struct elevator_queue;
+
+struct blk_queue_stats;
+
+struct rq_qos;
+
+struct blk_mq_ops;
+
+struct blk_mq_ctx;
+
+struct blk_mq_hw_ctx;
+
+struct blk_stat_callback;
+
+struct blkcg_gq;
+
+struct blk_flush_queue;
+
+struct throtl_data;
+
+struct blk_mq_tag_set;
+
+struct request_queue {
+ struct request *last_merge;
+ struct elevator_queue *elevator;
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ make_request_fn *make_request_fn;
+ const struct blk_mq_ops *mq_ops;
+ struct blk_mq_ctx *queue_ctx;
+ unsigned int queue_depth;
+ struct blk_mq_hw_ctx **queue_hw_ctx;
+ unsigned int nr_hw_queues;
+ struct backing_dev_info *backing_dev_info;
+ void *queuedata;
+ long unsigned int queue_flags;
+ atomic_t pm_only;
+ int id;
+ gfp_t bounce_gfp;
+ spinlock_t queue_lock;
+ struct kobject kobj;
+ struct kobject *mq_kobj;
+ long unsigned int nr_requests;
+ unsigned int dma_pad_mask;
+ unsigned int dma_alignment;
+ unsigned int rq_timeout;
+ int poll_nsec;
+ struct blk_stat_callback *poll_cb;
+ struct blk_rq_stat poll_stat[16];
+ struct timer_list timeout;
+ struct work_struct timeout_work;
+ struct list_head icq_list;
+ long unsigned int blkcg_pols[1];
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+ struct queue_limits limits;
+ unsigned int required_elevator_features;
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+ int node;
+ struct blk_flush_queue *fq;
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ struct delayed_work requeue_work;
+ struct mutex sysfs_lock;
+ struct mutex sysfs_dir_lock;
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
+ int mq_freeze_depth;
+ struct bsg_class_device bsg_dev;
+ struct throtl_data *td;
+ struct callback_head callback_head;
+ wait_queue_head_t mq_freeze_wq;
+ struct mutex mq_freeze_lock;
+ struct percpu_ref q_usage_counter;
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
+ struct bio_set bio_split;
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+ struct dentry *rqos_debugfs_dir;
+ bool mq_sysfs_init_done;
+ size_t cmd_size;
+ struct work_struct release_work;
+ u64 write_hints[5];
+};
+
+struct cgroup_base_stat {
+ struct task_cputime cputime;
+};
+
+struct psi_group {};
+
+struct cgroup_bpf {
+ struct bpf_prog_array *effective[34];
+ struct list_head progs[34];
+ u32 flags[34];
+ struct bpf_prog_array *inactive;
+ struct percpu_ref refcnt;
+ struct work_struct release_work;
+};
+
+struct cgroup_freezer_state {
+ bool freeze;
+ int e_freeze;
+ int nr_frozen_descendants;
+ int nr_frozen_tasks;
+};
+
+struct cgroup_root;
+
+struct cgroup_rstat_cpu;
+
+struct cgroup {
+ struct cgroup_subsys_state self;
+ long unsigned int flags;
+ int level;
+ int max_depth;
+ int nr_descendants;
+ int nr_dying_descendants;
+ int max_descendants;
+ int nr_populated_csets;
+ int nr_populated_domain_children;
+ int nr_populated_threaded_children;
+ int nr_threaded_children;
+ struct kernfs_node *kn;
+ struct cgroup_file procs_file;
+ struct cgroup_file events_file;
+ u16 subtree_control;
+ u16 subtree_ss_mask;
+ u16 old_subtree_control;
+ u16 old_subtree_ss_mask;
+ struct cgroup_subsys_state *subsys[11];
+ struct cgroup_root *root;
+ struct list_head cset_links;
+ struct list_head e_csets[11];
+ struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp;
+ struct cgroup_rstat_cpu *rstat_cpu;
+ struct list_head rstat_css_list;
+ struct cgroup_base_stat last_bstat;
+ struct cgroup_base_stat bstat;
+ struct prev_cputime prev_cputime;
+ struct list_head pidlists;
+ struct mutex pidlist_mutex;
+ wait_queue_head_t offline_waitq;
+ struct work_struct release_agent_work;
+ struct psi_group psi;
+ struct cgroup_bpf bpf;
+ atomic_t congestion_count;
+ struct cgroup_freezer_state freezer;
+ u64 ancestor_ids[0];
+};
+
+enum writeback_sync_modes {
+ WB_SYNC_NONE = 0,
+ WB_SYNC_ALL = 1,
+};
+
+struct writeback_control {
+ long int nr_to_write;
+ long int pages_skipped;
+ loff_t range_start;
+ loff_t range_end;
+ enum writeback_sync_modes sync_mode;
+ unsigned int for_kupdate: 1;
+ unsigned int for_background: 1;
+ unsigned int tagged_writepages: 1;
+ unsigned int for_reclaim: 1;
+ unsigned int range_cyclic: 1;
+ unsigned int for_sync: 1;
+ unsigned int no_cgroup_owner: 1;
+ unsigned int punt_to_cgroup: 1;
+ struct bdi_writeback *wb;
+ struct inode *inode;
+ int wb_id;
+ int wb_lcand_id;
+ int wb_tcand_id;
+ size_t wb_bytes;
+ size_t wb_lcand_bytes;
+ size_t wb_tcand_bytes;
+};
+
+struct readahead_control {
+ struct file *file;
+ struct address_space *mapping;
+ long unsigned int _index;
+ unsigned int _nr_pages;
+ unsigned int _batch_count;
+};
+
+struct swap_cluster_info {
+ spinlock_t lock;
+ unsigned int data: 24;
+ unsigned int flags: 8;
+};
+
+struct swap_cluster_list {
+ struct swap_cluster_info head;
+ struct swap_cluster_info tail;
+};
+
+struct percpu_cluster;
+
+struct swap_info_struct {
+ long unsigned int flags;
+ short int prio;
+ struct plist_node list;
+ signed char type;
+ unsigned int max;
+ unsigned char *swap_map;
+ struct swap_cluster_info *cluster_info;
+ struct swap_cluster_list free_clusters;
+ unsigned int lowest_bit;
+ unsigned int highest_bit;
+ unsigned int pages;
+ unsigned int inuse_pages;
+ unsigned int cluster_next;
+ unsigned int cluster_nr;
+ unsigned int *cluster_next_cpu;
+ struct percpu_cluster *percpu_cluster;
+ struct rb_root swap_extent_root;
+ struct block_device *bdev;
+ struct file *swap_file;
+ unsigned int old_block_size;
+ spinlock_t lock;
+ spinlock_t cont_lock;
+ struct work_struct discard_work;
+ struct swap_cluster_list discard_clusters;
+ struct plist_node avail_lists[0];
+};
+
+struct disk_stats;
+
+struct partition_meta_info;
+
+struct hd_struct {
+ sector_t start_sect;
+ sector_t nr_sects;
+ long unsigned int stamp;
+ struct disk_stats *dkstats;
+ struct percpu_ref ref;
+ sector_t alignment_offset;
+ unsigned int discard_alignment;
+ struct device __dev;
+ struct kobject *holder_dir;
+ int policy;
+ int partno;
+ struct partition_meta_info *info;
+ struct rcu_work rcu_work;
+};
+
+struct disk_part_tbl;
+
+struct block_device_operations;
+
+struct timer_rand_state;
+
+struct disk_events;
+
+struct badblocks;
+
+struct gendisk {
+ int major;
+ int first_minor;
+ int minors;
+ char disk_name[32];
+ short unsigned int events;
+ short unsigned int event_flags;
+ struct disk_part_tbl *part_tbl;
+ struct hd_struct part0;
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+ int flags;
+ struct rw_semaphore lookup_sem;
+ struct kobject *slave_dir;
+ struct timer_rand_state *random;
+ atomic_t sync_io;
+ struct disk_events *ev;
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+};
+
+struct cdev {
+ struct kobject kobj;
+ struct module *owner;
+ const struct file_operations *ops;
+ struct list_head list;
+ dev_t dev;
+ unsigned int count;
+};
+
+struct fc_log;
+
+struct p_log {
+ const char *prefix;
+ struct fc_log *log;
+};
+
+enum fs_context_purpose {
+ FS_CONTEXT_FOR_MOUNT = 0,
+ FS_CONTEXT_FOR_SUBMOUNT = 1,
+ FS_CONTEXT_FOR_RECONFIGURE = 2,
+};
+
+enum fs_context_phase {
+ FS_CONTEXT_CREATE_PARAMS = 0,
+ FS_CONTEXT_CREATING = 1,
+ FS_CONTEXT_AWAITING_MOUNT = 2,
+ FS_CONTEXT_AWAITING_RECONF = 3,
+ FS_CONTEXT_RECONF_PARAMS = 4,
+ FS_CONTEXT_RECONFIGURING = 5,
+ FS_CONTEXT_FAILED = 6,
+};
+
+struct fs_context_operations;
+
+struct fs_context {
+ const struct fs_context_operations *ops;
+ struct mutex uapi_mutex;
+ struct file_system_type *fs_type;
+ void *fs_private;
+ void *sget_key;
+ struct dentry *root;
+ struct user_namespace *user_ns;
+ struct net *net_ns;
+ const struct cred *cred;
+ struct p_log log;
+ const char *source;
+ void *security;
+ void *s_fs_info;
+ unsigned int sb_flags;
+ unsigned int sb_flags_mask;
+ unsigned int s_iflags;
+ unsigned int lsm_flags;
+ enum fs_context_purpose purpose: 8;
+ enum fs_context_phase phase: 8;
+ bool need_free: 1;
+ bool global: 1;
+};
+
+struct audit_names;
+
+struct filename {
+ const char *name;
+ const char *uptr;
+ int refcnt;
+ struct audit_names *aname;
+ const char iname[0];
+};
+
+typedef u8 blk_status_t;
+
+struct bvec_iter {
+ sector_t bi_sector;
+ unsigned int bi_size;
+ unsigned int bi_idx;
+ unsigned int bi_bvec_done;
+};
+
+typedef void bio_end_io_t(struct bio *);
+
+struct bio_issue {
+ u64 value;
+};
+
+struct bio_vec {
+ struct page *bv_page;
+ unsigned int bv_len;
+ unsigned int bv_offset;
+};
+
+struct bio {
+ struct bio *bi_next;
+ struct gendisk *bi_disk;
+ unsigned int bi_opf;
+ short unsigned int bi_flags;
+ short unsigned int bi_ioprio;
+ short unsigned int bi_write_hint;
+ blk_status_t bi_status;
+ u8 bi_partno;
+ atomic_t __bi_remaining;
+ struct bvec_iter bi_iter;
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+ struct blkcg_gq *bi_blkg;
+ struct bio_issue bi_issue;
+ union { };
+ short unsigned int bi_vcnt;
+ short unsigned int bi_max_vecs;
+ atomic_t __bi_cnt;
+ struct bio_vec *bi_io_vec;
+ struct bio_set *bi_pool;
+ struct bio_vec bi_inline_vecs[0];
+};
+
+struct linux_binprm {
+ struct vm_area_struct *vma;
+ long unsigned int vma_pages;
+ struct mm_struct *mm;
+ long unsigned int p;
+ long unsigned int argmin;
+ unsigned int have_execfd: 1;
+ unsigned int execfd_creds: 1;
+ unsigned int secureexec: 1;
+ unsigned int point_of_no_return: 1;
+ struct file *executable;
+ struct file *interpreter;
+ struct file *file;
+ struct cred *cred;
+ int unsafe;
+ unsigned int per_clear;
+ int argc;
+ int envc;
+ const char *filename;
+ const char *interp;
+ unsigned int interp_flags;
+ int execfd;
+ long unsigned int loader;
+ long unsigned int exec;
+ struct rlimit rlim_stack;
+ char buf[256];
+};
+
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct pt_regs *regs;
+ struct file *file;
+ long unsigned int limit;
+ long unsigned int mm_flags;
+ loff_t written;
+ loff_t pos;
+};
+
+struct ring_buffer_event {
+ u32 type_len: 5;
+ u32 time_delta: 27;
+ u32 array[0];
+};
+
+struct seq_buf {
+ char *buffer;
+ size_t size;
+ size_t len;
+ loff_t readpos;
+};
+
+struct trace_seq {
+ unsigned char buffer[4096];
+ struct seq_buf seq;
+ int full;
+};
+
+enum ctx_state {
+ CONTEXT_DISABLED = -1,
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER = 1,
+ CONTEXT_GUEST = 2,
+};
+
+struct context_tracking {
+ bool active;
+ int recursion;
+ enum ctx_state state;
+};
+
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
+ PERF_COUNT_SW_DUMMY = 9,
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
+ PERF_COUNT_SW_MAX = 11,
+};
+
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_op: 5;
+ __u64 mem_lvl: 14;
+ __u64 mem_snoop: 5;
+ __u64 mem_lock: 2;
+ __u64 mem_dtlb: 7;
+ __u64 mem_lvl_num: 4;
+ __u64 mem_remote: 1;
+ __u64 mem_snoopx: 2;
+ __u64 mem_rsvd: 24;
+ };
+};
+
+struct perf_branch_entry {
+ __u64 from;
+ __u64 to;
+ __u64 mispred: 1;
+ __u64 predicted: 1;
+ __u64 in_tx: 1;
+ __u64 abort: 1;
+ __u64 cycles: 16;
+ __u64 type: 4;
+ __u64 reserved: 40;
+};
+
+struct new_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+};
+
+struct uts_namespace {
+ struct kref kref;
+ struct new_utsname name;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct ns_common ns;
+};
+
+struct cgroup_namespace {
+ refcount_t count;
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct css_set *root_cset;
+};
+
+struct nsset {
+ unsigned int flags;
+ struct nsproxy *nsproxy;
+ struct fs_struct *fs;
+ const struct cred *cred;
+};
+
+struct proc_ns_operations {
+ const char *name;
+ const char *real_ns_name;
+ int type;
+ struct ns_common * (*get)(struct task_struct *);
+ void (*put)(struct ns_common *);
+ int (*install)(struct nsset *, struct ns_common *);
+ struct user_namespace * (*owner)(struct ns_common *);
+ struct ns_common * (*get_parent)(struct ns_common *);
+};
+
+struct ucounts {
+ struct hlist_node node;
+ struct user_namespace *ns;
+ kuid_t uid;
+ int count;
+ atomic_t ucount[10];
+};
+
+struct perf_guest_info_callbacks {
+ int (*is_in_guest)();
+ int (*is_user_mode)();
+ long unsigned int (*get_guest_ip)();
+ void (*handle_intel_pt_intr)();
+};
+
+struct perf_cpu_context;
+
+struct perf_output_handle;
+
+struct pmu {
+ struct list_head entry;
+ struct module *module;
+ struct device *dev;
+ const struct attribute_group **attr_groups;
+ const struct attribute_group **attr_update;
+ const char *name;
+ int type;
+ int capabilities;
+ int *pmu_disable_count;
+ struct perf_cpu_context *pmu_cpu_context;
+ atomic_t exclusive_cnt;
+ int task_ctx_nr;
+ int hrtimer_interval_ms;
+ unsigned int nr_addr_filters;
+ void (*pmu_enable)(struct pmu *);
+ void (*pmu_disable)(struct pmu *);
+ int (*event_init)(struct perf_event *);
+ void (*event_mapped)(struct perf_event *, struct mm_struct *);
+ void (*event_unmapped)(struct perf_event *, struct mm_struct *);
+ int (*add)(struct perf_event *, int);
+ void (*del)(struct perf_event *, int);
+ void (*start)(struct perf_event *, int);
+ void (*stop)(struct perf_event *, int);
+ void (*read)(struct perf_event *);
+ void (*start_txn)(struct pmu *, unsigned int);
+ int (*commit_txn)(struct pmu *);
+ void (*cancel_txn)(struct pmu *);
+ int (*event_idx)(struct perf_event *);
+ void (*sched_task)(struct perf_event_context *, bool);
+ size_t task_ctx_size;
+ void (*swap_task_ctx)(struct perf_event_context *, struct perf_event_context *);
+ void * (*setup_aux)(struct perf_event *, void **, int, bool);
+ void (*free_aux)(void *);
+ long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int);
+ int (*addr_filters_validate)(struct list_head *);
+ void (*addr_filters_sync)(struct perf_event *);
+ int (*aux_output_match)(struct perf_event *);
+ int (*filter_match)(struct perf_event *);
+ int (*check_period)(struct perf_event *, u64);
+};
+
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED = 1,
+ DOMAIN_BUS_GENERIC_MSI = 2,
+ DOMAIN_BUS_PCI_MSI = 3,
+ DOMAIN_BUS_PLATFORM_MSI = 4,
+ DOMAIN_BUS_NEXUS = 5,
+ DOMAIN_BUS_IPI = 6,
+ DOMAIN_BUS_FSL_MC_MSI = 7,
+ DOMAIN_BUS_TI_SCI_INTA_MSI = 8,
+ DOMAIN_BUS_WAKEUP = 9,
+};
+
+struct irq_domain_ops;
+
+struct irq_domain_chip_generic;
+
+struct irq_domain {
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct irq_domain *parent;
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_direct_max_irq;
+ unsigned int revmap_size;
+ struct xarray revmap_tree;
+ struct mutex revmap_tree_mutex;
+ unsigned int linear_revmap[0];
+};
+
+typedef u32 phandle;
+
+struct property;
+
+struct device_node {
+ const char *name;
+ phandle phandle;
+ const char *full_name;
+ struct fwnode_handle fwnode;
+ struct property *properties;
+ struct property *deadprops;
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ long unsigned int _flags;
+ void *data;
+};
+
+enum cpuhp_state {
+ CPUHP_INVALID = -1,
+ CPUHP_OFFLINE = 0,
+ CPUHP_CREATE_THREADS = 1,
+ CPUHP_PERF_PREPARE = 2,
+ CPUHP_PERF_X86_PREPARE = 3,
+ CPUHP_PERF_X86_AMD_UNCORE_PREP = 4,
+ CPUHP_PERF_POWER = 5,
+ CPUHP_PERF_SUPERH = 6,
+ CPUHP_X86_HPET_DEAD = 7,
+ CPUHP_X86_APB_DEAD = 8,
+ CPUHP_X86_MCE_DEAD = 9,
+ CPUHP_VIRT_NET_DEAD = 10,
+ CPUHP_SLUB_DEAD = 11,
+ CPUHP_MM_WRITEBACK_DEAD = 12,
+ CPUHP_MM_VMSTAT_DEAD = 13,
+ CPUHP_SOFTIRQ_DEAD = 14,
+ CPUHP_NET_MVNETA_DEAD = 15,
+ CPUHP_CPUIDLE_DEAD = 16,
+ CPUHP_ARM64_FPSIMD_DEAD = 17,
+ CPUHP_ARM_OMAP_WAKE_DEAD = 18,
+ CPUHP_IRQ_POLL_DEAD = 19,
+ CPUHP_BLOCK_SOFTIRQ_DEAD = 20,
+ CPUHP_ACPI_CPUDRV_DEAD = 21,
+ CPUHP_S390_PFAULT_DEAD = 22,
+ CPUHP_BLK_MQ_DEAD = 23,
+ CPUHP_FS_BUFF_DEAD = 24,
+ CPUHP_PRINTK_DEAD = 25,
+ CPUHP_MM_MEMCQ_DEAD = 26,
+ CPUHP_PERCPU_CNT_DEAD = 27,
+ CPUHP_RADIX_DEAD = 28,
+ CPUHP_PAGE_ALLOC_DEAD = 29,
+ CPUHP_NET_DEV_DEAD = 30,
+ CPUHP_PCI_XGENE_DEAD = 31,
+ CPUHP_IOMMU_INTEL_DEAD = 32,
+ CPUHP_LUSTRE_CFS_DEAD = 33,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 34,
+ CPUHP_PADATA_DEAD = 35,
+ CPUHP_WORKQUEUE_PREP = 36,
+ CPUHP_POWER_NUMA_PREPARE = 37,
+ CPUHP_HRTIMERS_PREPARE = 38,
+ CPUHP_PROFILE_PREPARE = 39,
+ CPUHP_X2APIC_PREPARE = 40,
+ CPUHP_SMPCFD_PREPARE = 41,
+ CPUHP_RELAY_PREPARE = 42,
+ CPUHP_SLAB_PREPARE = 43,
+ CPUHP_MD_RAID5_PREPARE = 44,
+ CPUHP_RCUTREE_PREP = 45,
+ CPUHP_CPUIDLE_COUPLED_PREPARE = 46,
+ CPUHP_POWERPC_PMAC_PREPARE = 47,
+ CPUHP_POWERPC_MMU_CTX_PREPARE = 48,
+ CPUHP_XEN_PREPARE = 49,
+ CPUHP_XEN_EVTCHN_PREPARE = 50,
+ CPUHP_ARM_SHMOBILE_SCU_PREPARE = 51,
+ CPUHP_SH_SH3X_PREPARE = 52,
+ CPUHP_NET_FLOW_PREPARE = 53,
+ CPUHP_TOPOLOGY_PREPARE = 54,
+ CPUHP_NET_IUCV_PREPARE = 55,
+ CPUHP_ARM_BL_PREPARE = 56,
+ CPUHP_TRACE_RB_PREPARE = 57,
+ CPUHP_MM_ZS_PREPARE = 58,
+ CPUHP_MM_ZSWP_MEM_PREPARE = 59,
+ CPUHP_MM_ZSWP_POOL_PREPARE = 60,
+ CPUHP_KVM_PPC_BOOK3S_PREPARE = 61,
+ CPUHP_ZCOMP_PREPARE = 62,
+ CPUHP_TIMERS_PREPARE = 63,
+ CPUHP_MIPS_SOC_PREPARE = 64,
+ CPUHP_BP_PREPARE_DYN = 65,
+ CPUHP_BP_PREPARE_DYN_END = 85,
+ CPUHP_BRINGUP_CPU = 86,
+ CPUHP_AP_IDLE_DEAD = 87,
+ CPUHP_AP_OFFLINE = 88,
+ CPUHP_AP_SCHED_STARTING = 89,
+ CPUHP_AP_RCUTREE_DYING = 90,
+ CPUHP_AP_CPU_PM_STARTING = 91,
+ CPUHP_AP_IRQ_GIC_STARTING = 92,
+ CPUHP_AP_IRQ_HIP04_STARTING = 93,
+ CPUHP_AP_IRQ_ARMADA_XP_STARTING = 94,
+ CPUHP_AP_IRQ_BCM2836_STARTING = 95,
+ CPUHP_AP_IRQ_MIPS_GIC_STARTING = 96,
+ CPUHP_AP_IRQ_RISCV_STARTING = 97,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 98,
+ CPUHP_AP_ARM_MVEBU_COHERENCY = 99,
+ CPUHP_AP_MICROCODE_LOADER = 100,
+ CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 101,
+ CPUHP_AP_PERF_X86_STARTING = 102,
+ CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 103,
+ CPUHP_AP_PERF_X86_CQM_STARTING = 104,
+ CPUHP_AP_PERF_X86_CSTATE_STARTING = 105,
+ CPUHP_AP_PERF_XTENSA_STARTING = 106,
+ CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 107,
+ CPUHP_AP_ARM_SDEI_STARTING = 108,
+ CPUHP_AP_ARM_VFP_STARTING = 109,
+ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 110,
+ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 111,
+ CPUHP_AP_PERF_ARM_ACPI_STARTING = 112,
+ CPUHP_AP_PERF_ARM_STARTING = 113,
+ CPUHP_AP_ARM_L2X0_STARTING = 114,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 115,
+ CPUHP_AP_ARM_ARCH_TIMER_STARTING = 116,
+ CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 117,
+ CPUHP_AP_JCORE_TIMER_STARTING = 118,
+ CPUHP_AP_ARM_TWD_STARTING = 119,
+ CPUHP_AP_QCOM_TIMER_STARTING = 120,
+ CPUHP_AP_TEGRA_TIMER_STARTING = 121,
+ CPUHP_AP_ARMADA_TIMER_STARTING = 122,
+ CPUHP_AP_MARCO_TIMER_STARTING = 123,
+ CPUHP_AP_MIPS_GIC_TIMER_STARTING = 124,
+ CPUHP_AP_ARC_TIMER_STARTING = 125,
+ CPUHP_AP_RISCV_TIMER_STARTING = 126,
+ CPUHP_AP_CSKY_TIMER_STARTING = 127,
+ CPUHP_AP_HYPERV_TIMER_STARTING = 128,
+ CPUHP_AP_KVM_STARTING = 129,
+ CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING = 130,
+ CPUHP_AP_KVM_ARM_VGIC_STARTING = 131,
+ CPUHP_AP_KVM_ARM_TIMER_STARTING = 132,
+ CPUHP_AP_DUMMY_TIMER_STARTING = 133,
+ CPUHP_AP_ARM_XEN_STARTING = 134,
+ CPUHP_AP_ARM_KVMPV_STARTING = 135,
+ CPUHP_AP_ARM_CORESIGHT_STARTING = 136,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 137,
+ CPUHP_AP_ARM64_ISNDEP_STARTING = 138,
+ CPUHP_AP_SMPCFD_DYING = 139,
+ CPUHP_AP_X86_TBOOT_DYING = 140,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 141,
+ CPUHP_AP_ONLINE = 142,
+ CPUHP_TEARDOWN_CPU = 143,
+ CPUHP_AP_ONLINE_IDLE = 144,
+ CPUHP_AP_SMPBOOT_THREADS = 145,
+ CPUHP_AP_X86_VDSO_VMA_ONLINE = 146,
+ CPUHP_AP_IRQ_AFFINITY_ONLINE = 147,
+ CPUHP_AP_BLK_MQ_ONLINE = 148,
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 149,
+ CPUHP_AP_X86_INTEL_EPB_ONLINE = 150,
+ CPUHP_AP_PERF_ONLINE = 151,
+ CPUHP_AP_PERF_X86_ONLINE = 152,
+ CPUHP_AP_PERF_X86_UNCORE_ONLINE = 153,
+ CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 154,
+ CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 155,
+ CPUHP_AP_PERF_X86_RAPL_ONLINE = 156,
+ CPUHP_AP_PERF_X86_CQM_ONLINE = 157,
+ CPUHP_AP_PERF_X86_CSTATE_ONLINE = 158,
+ CPUHP_AP_PERF_S390_CF_ONLINE = 159,
+ CPUHP_AP_PERF_S390_SF_ONLINE = 160,
+ CPUHP_AP_PERF_ARM_CCI_ONLINE = 161,
+ CPUHP_AP_PERF_ARM_CCN_ONLINE = 162,
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 163,
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 164,
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 165,
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE = 166,
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 167,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 168,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 169,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 170,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 171,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 172,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 173,
+ CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 174,
+ CPUHP_AP_WATCHDOG_ONLINE = 175,
+ CPUHP_AP_WORKQUEUE_ONLINE = 176,
+ CPUHP_AP_RCUTREE_ONLINE = 177,
+ CPUHP_AP_BASE_CACHEINFO_ONLINE = 178,
+ CPUHP_AP_ONLINE_DYN = 179,
+ CPUHP_AP_ONLINE_DYN_END = 209,
+ CPUHP_AP_X86_HPET_ONLINE = 210,
+ CPUHP_AP_X86_KVM_CLK_ONLINE = 211,
+ CPUHP_AP_ACTIVE = 212,
+ CPUHP_ONLINE = 213,
+};
+
+struct perf_regs {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
+struct kernel_cpustat {
+ u64 cpustat[10];
+};
+
+struct kernel_stat {
+ long unsigned int irqs_sum;
+ unsigned int softirqs[10];
+};
+
+struct u64_stats_sync {};
+
+struct bpf_insn {
+ __u8 code;
+ __u8 dst_reg: 4;
+ __u8 src_reg: 4;
+ __s16 off;
+ __s32 imm;
+};
+
+struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id;
+ __u32 attach_type;
+};
+
+enum bpf_map_type {
+ BPF_MAP_TYPE_UNSPEC = 0,
+ BPF_MAP_TYPE_HASH = 1,
+ BPF_MAP_TYPE_ARRAY = 2,
+ BPF_MAP_TYPE_PROG_ARRAY = 3,
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4,
+ BPF_MAP_TYPE_PERCPU_HASH = 5,
+ BPF_MAP_TYPE_PERCPU_ARRAY = 6,
+ BPF_MAP_TYPE_STACK_TRACE = 7,
+ BPF_MAP_TYPE_CGROUP_ARRAY = 8,
+ BPF_MAP_TYPE_LRU_HASH = 9,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 10,
+ BPF_MAP_TYPE_LPM_TRIE = 11,
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 12,
+ BPF_MAP_TYPE_HASH_OF_MAPS = 13,
+ BPF_MAP_TYPE_DEVMAP = 14,
+ BPF_MAP_TYPE_SOCKMAP = 15,
+ BPF_MAP_TYPE_CPUMAP = 16,
+ BPF_MAP_TYPE_XSKMAP = 17,
+ BPF_MAP_TYPE_SOCKHASH = 18,
+ BPF_MAP_TYPE_CGROUP_STORAGE = 19,
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21,
+ BPF_MAP_TYPE_QUEUE = 22,
+ BPF_MAP_TYPE_STACK = 23,
+ BPF_MAP_TYPE_SK_STORAGE = 24,
+ BPF_MAP_TYPE_DEVMAP_HASH = 25,
+ BPF_MAP_TYPE_STRUCT_OPS = 26,
+ BPF_MAP_TYPE_RINGBUF = 27,
+};
+
+union bpf_attr {
+ struct {
+ __u32 map_type;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 inner_map_fd;
+ __u32 numa_node;
+ char map_name[16];
+ __u32 map_ifindex;
+ __u32 btf_fd;
+ __u32 btf_key_type_id;
+ __u32 btf_value_type_id;
+ __u32 btf_vmlinux_value_type_id;
+ };
+ struct {
+ __u32 map_fd;
+ __u64 key;
+ union {
+ __u64 value;
+ __u64 next_key;
+ };
+ __u64 flags;
+ };
+ struct {
+ __u64 in_batch;
+ __u64 out_batch;
+ __u64 keys;
+ __u64 values;
+ __u32 count;
+ __u32 map_fd;
+ __u64 elem_flags;
+ __u64 flags;
+ } batch;
+ struct {
+ __u32 prog_type;
+ __u32 insn_cnt;
+ __u64 insns;
+ __u64 license;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+ __u32 kern_version;
+ __u32 prog_flags;
+ char prog_name[16];
+ __u32 prog_ifindex;
+ __u32 expected_attach_type;
+ __u32 prog_btf_fd;
+ __u32 func_info_rec_size;
+ __u64 func_info;
+ __u32 func_info_cnt;
+ __u32 line_info_rec_size;
+ __u64 line_info;
+ __u32 line_info_cnt;
+ __u32 attach_btf_id;
+ __u32 attach_prog_fd;
+ };
+ struct {
+ __u64 pathname;
+ __u32 bpf_fd;
+ __u32 file_flags;
+ };
+ struct {
+ __u32 target_fd;
+ __u32 attach_bpf_fd;
+ __u32 attach_type;
+ __u32 attach_flags;
+ __u32 replace_bpf_fd;
+ };
+ struct {
+ __u32 prog_fd;
+ __u32 retval;
+ __u32 data_size_in;
+ __u32 data_size_out;
+ __u64 data_in;
+ __u64 data_out;
+ __u32 repeat;
+ __u32 duration;
+ __u32 ctx_size_in;
+ __u32 ctx_size_out;
+ __u64 ctx_in;
+ __u64 ctx_out;
+ } test;
+ struct {
+ union {
+ __u32 start_id;
+ __u32 prog_id;
+ __u32 map_id;
+ __u32 btf_id;
+ __u32 link_id;
+ };
+ __u32 next_id;
+ __u32 open_flags;
+ };
+ struct {
+ __u32 bpf_fd;
+ __u32 info_len;
+ __u64 info;
+ } info;
+ struct {
+ __u32 target_fd;
+ __u32 attach_type;
+ __u32 query_flags;
+ __u32 attach_flags;
+ __u64 prog_ids;
+ __u32 prog_cnt;
+ } query;
+ struct {
+ __u64 name;
+ __u32 prog_fd;
+ } raw_tracepoint;
+ struct {
+ __u64 btf;
+ __u64 btf_log_buf;
+ __u32 btf_size;
+ __u32 btf_log_size;
+ __u32 btf_log_level;
+ };
+ struct {
+ __u32 pid;
+ __u32 fd;
+ __u32 flags;
+ __u32 buf_len;
+ __u64 buf;
+ __u32 prog_id;
+ __u32 fd_type;
+ __u64 probe_offset;
+ __u64 probe_addr;
+ } task_fd_query;
+ struct {
+ __u32 prog_fd;
+ __u32 target_fd;
+ __u32 attach_type;
+ __u32 flags;
+ } link_create;
+ struct {
+ __u32 link_fd;
+ __u32 new_prog_fd;
+ __u32 flags;
+ __u32 old_prog_fd;
+ } link_update;
+ struct {
+ __u32 type;
+ } enable_stats;
+ struct {
+ __u32 link_fd;
+ __u32 flags;
+ } iter_create;
+};
+
+enum bpf_func_id {
+ BPF_FUNC_unspec = 0,
+ BPF_FUNC_map_lookup_elem = 1,
+ BPF_FUNC_map_update_elem = 2,
+ BPF_FUNC_map_delete_elem = 3,
+ BPF_FUNC_probe_read = 4,
+ BPF_FUNC_ktime_get_ns = 5,
+ BPF_FUNC_trace_printk = 6,
+ BPF_FUNC_get_prandom_u32 = 7,
+ BPF_FUNC_get_smp_processor_id = 8,
+ BPF_FUNC_skb_store_bytes = 9,
+ BPF_FUNC_l3_csum_replace = 10,
+ BPF_FUNC_l4_csum_replace = 11,
+ BPF_FUNC_tail_call = 12,
+ BPF_FUNC_clone_redirect = 13,
+ BPF_FUNC_get_current_pid_tgid = 14,
+ BPF_FUNC_get_current_uid_gid = 15,
+ BPF_FUNC_get_current_comm = 16,
+ BPF_FUNC_get_cgroup_classid = 17,
+ BPF_FUNC_skb_vlan_push = 18,
+ BPF_FUNC_skb_vlan_pop = 19,
+ BPF_FUNC_skb_get_tunnel_key = 20,
+ BPF_FUNC_skb_set_tunnel_key = 21,
+ BPF_FUNC_perf_event_read = 22,
+ BPF_FUNC_redirect = 23,
+ BPF_FUNC_get_route_realm = 24,
+ BPF_FUNC_perf_event_output = 25,
+ BPF_FUNC_skb_load_bytes = 26,
+ BPF_FUNC_get_stackid = 27,
+ BPF_FUNC_csum_diff = 28,
+ BPF_FUNC_skb_get_tunnel_opt = 29,
+ BPF_FUNC_skb_set_tunnel_opt = 30,
+ BPF_FUNC_skb_change_proto = 31,
+ BPF_FUNC_skb_change_type = 32,
+ BPF_FUNC_skb_under_cgroup = 33,
+ BPF_FUNC_get_hash_recalc = 34,
+ BPF_FUNC_get_current_task = 35,
+ BPF_FUNC_probe_write_user = 36,
+ BPF_FUNC_current_task_under_cgroup = 37,
+ BPF_FUNC_skb_change_tail = 38,
+ BPF_FUNC_skb_pull_data = 39,
+ BPF_FUNC_csum_update = 40,
+ BPF_FUNC_set_hash_invalid = 41,
+ BPF_FUNC_get_numa_node_id = 42,
+ BPF_FUNC_skb_change_head = 43,
+ BPF_FUNC_xdp_adjust_head = 44,
+ BPF_FUNC_probe_read_str = 45,
+ BPF_FUNC_get_socket_cookie = 46,
+ BPF_FUNC_get_socket_uid = 47,
+ BPF_FUNC_set_hash = 48,
+ BPF_FUNC_setsockopt = 49,
+ BPF_FUNC_skb_adjust_room = 50,
+ BPF_FUNC_redirect_map = 51,
+ BPF_FUNC_sk_redirect_map = 52,
+ BPF_FUNC_sock_map_update = 53,
+ BPF_FUNC_xdp_adjust_meta = 54,
+ BPF_FUNC_perf_event_read_value = 55,
+ BPF_FUNC_perf_prog_read_value = 56,
+ BPF_FUNC_getsockopt = 57,
+ BPF_FUNC_override_return = 58,
+ BPF_FUNC_sock_ops_cb_flags_set = 59,
+ BPF_FUNC_msg_redirect_map = 60,
+ BPF_FUNC_msg_apply_bytes = 61,
+ BPF_FUNC_msg_cork_bytes = 62,
+ BPF_FUNC_msg_pull_data = 63,
+ BPF_FUNC_bind = 64,
+ BPF_FUNC_xdp_adjust_tail = 65,
+ BPF_FUNC_skb_get_xfrm_state = 66,
+ BPF_FUNC_get_stack = 67,
+ BPF_FUNC_skb_load_bytes_relative = 68,
+ BPF_FUNC_fib_lookup = 69,
+ BPF_FUNC_sock_hash_update = 70,
+ BPF_FUNC_msg_redirect_hash = 71,
+ BPF_FUNC_sk_redirect_hash = 72,
+ BPF_FUNC_lwt_push_encap = 73,
+ BPF_FUNC_lwt_seg6_store_bytes = 74,
+ BPF_FUNC_lwt_seg6_adjust_srh = 75,
+ BPF_FUNC_lwt_seg6_action = 76,
+ BPF_FUNC_rc_repeat = 77,
+ BPF_FUNC_rc_keydown = 78,
+ BPF_FUNC_skb_cgroup_id = 79,
+ BPF_FUNC_get_current_cgroup_id = 80,
+ BPF_FUNC_get_local_storage = 81,
+ BPF_FUNC_sk_select_reuseport = 82,
+ BPF_FUNC_skb_ancestor_cgroup_id = 83,
+ BPF_FUNC_sk_lookup_tcp = 84,
+ BPF_FUNC_sk_lookup_udp = 85,
+ BPF_FUNC_sk_release = 86,
+ BPF_FUNC_map_push_elem = 87,
+ BPF_FUNC_map_pop_elem = 88,
+ BPF_FUNC_map_peek_elem = 89,
+ BPF_FUNC_msg_push_data = 90,
+ BPF_FUNC_msg_pop_data = 91,
+ BPF_FUNC_rc_pointer_rel = 92,
+ BPF_FUNC_spin_lock = 93,
+ BPF_FUNC_spin_unlock = 94,
+ BPF_FUNC_sk_fullsock = 95,
+ BPF_FUNC_tcp_sock = 96,
+ BPF_FUNC_skb_ecn_set_ce = 97,
+ BPF_FUNC_get_listener_sock = 98,
+ BPF_FUNC_skc_lookup_tcp = 99,
+ BPF_FUNC_tcp_check_syncookie = 100,
+ BPF_FUNC_sysctl_get_name = 101,
+ BPF_FUNC_sysctl_get_current_value = 102,
+ BPF_FUNC_sysctl_get_new_value = 103,
+ BPF_FUNC_sysctl_set_new_value = 104,
+ BPF_FUNC_strtol = 105,
+ BPF_FUNC_strtoul = 106,
+ BPF_FUNC_sk_storage_get = 107,
+ BPF_FUNC_sk_storage_delete = 108,
+ BPF_FUNC_send_signal = 109,
+ BPF_FUNC_tcp_gen_syncookie = 110,
+ BPF_FUNC_skb_output = 111,
+ BPF_FUNC_probe_read_user = 112,
+ BPF_FUNC_probe_read_kernel = 113,
+ BPF_FUNC_probe_read_user_str = 114,
+ BPF_FUNC_probe_read_kernel_str = 115,
+ BPF_FUNC_tcp_send_ack = 116,
+ BPF_FUNC_send_signal_thread = 117,
+ BPF_FUNC_jiffies64 = 118,
+ BPF_FUNC_read_branch_records = 119,
+ BPF_FUNC_get_ns_current_pid_tgid = 120,
+ BPF_FUNC_xdp_output = 121,
+ BPF_FUNC_get_netns_cookie = 122,
+ BPF_FUNC_get_current_ancestor_cgroup_id = 123,
+ BPF_FUNC_sk_assign = 124,
+ BPF_FUNC_ktime_get_boot_ns = 125,
+ BPF_FUNC_seq_printf = 126,
+ BPF_FUNC_seq_write = 127,
+ BPF_FUNC_sk_cgroup_id = 128,
+ BPF_FUNC_sk_ancestor_cgroup_id = 129,
+ BPF_FUNC_ringbuf_output = 130,
+ BPF_FUNC_ringbuf_reserve = 131,
+ BPF_FUNC_ringbuf_submit = 132,
+ BPF_FUNC_ringbuf_discard = 133,
+ BPF_FUNC_ringbuf_query = 134,
+ BPF_FUNC_csum_level = 135,
+ BPF_FUNC_skc_to_tcp6_sock = 136,
+ BPF_FUNC_skc_to_tcp_sock = 137,
+ BPF_FUNC_skc_to_tcp_timewait_sock = 138,
+ BPF_FUNC_skc_to_tcp_request_sock = 139,
+ BPF_FUNC_skc_to_udp6_sock = 140,
+ BPF_FUNC_get_task_stack = 141,
+ BPF_FUNC_load_hdr_opt = 142,
+ BPF_FUNC_store_hdr_opt = 143,
+ BPF_FUNC_reserve_hdr_opt = 144,
+ BPF_FUNC_inode_storage_get = 145,
+ BPF_FUNC_inode_storage_delete = 146,
+ BPF_FUNC_d_path = 147,
+ BPF_FUNC_copy_from_user = 148,
+ BPF_FUNC_snprintf_btf = 149,
+ BPF_FUNC_seq_printf_btf = 150,
+ BPF_FUNC_skb_cgroup_classid = 151,
+ BPF_FUNC_redirect_neigh = 152,
+ BPF_FUNC_per_cpu_ptr = 153,
+ BPF_FUNC_this_cpu_ptr = 154,
+ BPF_FUNC_redirect_peer = 155,
+ BPF_FUNC_task_storage_get = 156,
+ BPF_FUNC_task_storage_delete = 157,
+ BPF_FUNC_get_current_task_btf = 158,
+ BPF_FUNC_bprm_opts_set = 159,
+ BPF_FUNC_ktime_get_coarse_ns = 160,
+ BPF_FUNC_ima_inode_hash = 161,
+ BPF_FUNC_sock_from_file = 162,
+ BPF_FUNC_check_mtu = 163,
+ BPF_FUNC_for_each_map_elem = 164,
+ BPF_FUNC_snprintf = 165,
+ BPF_FUNC_sys_bpf = 166,
+ BPF_FUNC_btf_find_by_name_kind = 167,
+ BPF_FUNC_sys_close = 168,
+ BPF_FUNC_timer_init = 169,
+ BPF_FUNC_timer_set_callback = 170,
+ BPF_FUNC_timer_start = 171,
+ BPF_FUNC_timer_cancel = 172,
+ BPF_FUNC_get_func_ip = 173,
+ BPF_FUNC_get_attach_cookie = 174,
+ BPF_FUNC_task_pt_regs = 175,
+ BPF_FUNC_get_branch_snapshot = 176,
+ BPF_FUNC_trace_vprintk = 177,
+ BPF_FUNC_skc_to_unix_sock = 178,
+ BPF_FUNC_kallsyms_lookup_name = 179,
+ BPF_FUNC_find_vma = 180,
+ BPF_FUNC_loop = 181,
+ BPF_FUNC_strncmp = 182,
+ BPF_FUNC_get_func_arg = 183,
+ BPF_FUNC_get_func_ret = 184,
+ BPF_FUNC_get_func_arg_cnt = 185,
+ BPF_FUNC_get_retval = 186,
+ BPF_FUNC_set_retval = 187,
+ BPF_FUNC_xdp_get_buff_len = 188,
+ BPF_FUNC_xdp_load_bytes = 189,
+ BPF_FUNC_xdp_store_bytes = 190,
+ BPF_FUNC_copy_from_user_task = 191,
+ BPF_FUNC_skb_set_tstamp = 192,
+ BPF_FUNC_ima_file_hash = 193,
+ BPF_FUNC_kptr_xchg = 194,
+ BPF_FUNC_map_lookup_percpu_elem = 195,
+ BPF_FUNC_skc_to_mptcp_sock = 196,
+ BPF_FUNC_dynptr_from_mem = 197,
+ BPF_FUNC_ringbuf_reserve_dynptr = 198,
+ BPF_FUNC_ringbuf_submit_dynptr = 199,
+ BPF_FUNC_ringbuf_discard_dynptr = 200,
+ BPF_FUNC_dynptr_read = 201,
+ BPF_FUNC_dynptr_write = 202,
+ BPF_FUNC_dynptr_data = 203,
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204,
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205,
+ BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206,
+ BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207,
+ BPF_FUNC_ktime_get_tai_ns = 208,
+ BPF_FUNC_user_ringbuf_drain = 209,
+ BPF_FUNC_cgrp_storage_get = 210,
+ BPF_FUNC_cgrp_storage_delete = 211,
+ __BPF_FUNC_MAX_ID = 212,
+};
+
+struct bpf_func_info {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+struct bpf_line_info {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
+struct bpf_map;
+
+struct btf;
+
+struct btf_type;
+
+struct bpf_prog_aux;
+
+struct bpf_map_ops {
+ int (*map_alloc_check)(union bpf_attr *);
+ struct bpf_map * (*map_alloc)(union bpf_attr *);
+ void (*map_release)(struct bpf_map *, struct file *);
+ void (*map_free)(struct bpf_map *);
+ int (*map_get_next_key)(struct bpf_map *, void *, void *);
+ void (*map_release_uref)(struct bpf_map *);
+ void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *);
+ int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *);
+ int (*map_update_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *);
+ int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *);
+ void * (*map_lookup_elem)(struct bpf_map *, void *);
+ int (*map_update_elem)(struct bpf_map *, void *, void *, u64);
+ int (*map_delete_elem)(struct bpf_map *, void *);
+ int (*map_push_elem)(struct bpf_map *, void *, u64);
+ int (*map_pop_elem)(struct bpf_map *, void *);
+ int (*map_peek_elem)(struct bpf_map *, void *);
+ void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int);
+ void (*map_fd_put_ptr)(void *);
+ u32 (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *);
+ u32 (*map_fd_sys_lookup_elem)(void *);
+ void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *);
+ int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *);
+ int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *);
+ void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *);
+ void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *);
+ int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32);
+ int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *);
+ int (*map_mmap)(struct bpf_map *, struct vm_area_struct *);
+ __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *);
+ const char * const map_btf_name;
+ int *map_btf_id;
+};
+
+struct bpf_map_memory {
+ u32 pages;
+ struct user_struct *user;
+};
+
+struct bpf_map {
+ const struct bpf_map_ops *ops;
+ struct bpf_map *inner_map_meta;
+ void *security;
+ enum bpf_map_type map_type;
+ u32 key_size;
+ u32 value_size;
+ u32 max_entries;
+ u32 map_flags;
+ int spin_lock_off;
+ u32 id;
+ int numa_node;
+ u32 btf_key_type_id;
+ u32 btf_value_type_id;
+ struct btf *btf;
+ struct bpf_map_memory memory;
+ char name[16];
+ u32 btf_vmlinux_value_type_id;
+ bool bypass_spec_v1;
+ bool frozen;
+ long: 16;
+ long: 64;
+ long: 64;
+ atomic64_t refcnt;
+ atomic64_t usercnt;
+ struct work_struct work;
+ struct mutex freeze_mutex;
+ u64 writecnt;
+ long: 64;
+ long: 64;
+};
+
+struct btf_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+ __u32 type_off;
+ __u32 type_len;
+ __u32 str_off;
+ __u32 str_len;
+};
+
+struct btf {
+ void *data;
+ struct btf_type **types;
+ u32 *resolved_ids;
+ u32 *resolved_sizes;
+ const char *strings;
+ void *nohdr_data;
+ struct btf_header hdr;
+ u32 nr_types;
+ u32 types_size;
+ u32 data_size;
+ refcount_t refcnt;
+ u32 id;
+ struct callback_head rcu;
+};
+
+struct btf_type {
+ __u32 name_off;
+ __u32 info;
+ union {
+ __u32 size;
+ __u32 type;
+ };
+};
+
+enum bpf_tramp_prog_type {
+ BPF_TRAMP_FENTRY = 0,
+ BPF_TRAMP_FEXIT = 1,
+ BPF_TRAMP_MODIFY_RETURN = 2,
+ BPF_TRAMP_MAX = 3,
+ BPF_TRAMP_REPLACE = 4,
+};
+
+struct bpf_ksym {
+ long unsigned int start;
+ long unsigned int end;
+ char name[128];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
+
+struct bpf_ctx_arg_aux;
+
+struct bpf_trampoline;
+
+struct bpf_jit_poke_descriptor;
+
+struct bpf_prog_ops;
+
+struct bpf_prog_offload;
+
+struct bpf_func_info_aux;
+
+struct bpf_prog_stats;
+
+struct bpf_prog_aux {
+ atomic64_t refcnt;
+ u32 used_map_cnt;
+ u32 max_ctx_offset;
+ u32 max_pkt_offset;
+ u32 max_tp_access;
+ u32 stack_depth;
+ u32 id;
+ u32 func_cnt;
+ u32 func_idx;
+ u32 attach_btf_id;
+ u32 ctx_arg_info_size;
+ const struct bpf_ctx_arg_aux *ctx_arg_info;
+ struct bpf_prog *linked_prog;
+ bool verifier_zext;
+ bool offload_requested;
+ bool attach_btf_trace;
+ bool func_proto_unreliable;
+ enum bpf_tramp_prog_type trampoline_prog_type;
+ struct bpf_trampoline *trampoline;
+ struct hlist_node tramp_hlist;
+ const struct btf_type *attach_func_proto;
+ const char *attach_func_name;
+ struct bpf_prog **func;
+ void *jit_data;
+ struct bpf_jit_poke_descriptor *poke_tab;
+ u32 size_poke_tab;
+ struct bpf_ksym ksym;
+ const struct bpf_prog_ops *ops;
+ struct bpf_map **used_maps;
+ struct bpf_prog *prog;
+ struct user_struct *user;
+ u64 load_time;
+ struct bpf_map *cgroup_storage[2];
+ char name[16];
+ void *security;
+ struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ struct bpf_func_info_aux *func_info_aux;
+ struct bpf_line_info *linfo;
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ u32 linfo_idx;
+ u32 num_exentries;
+ struct exception_table_entry *extable;
+ struct bpf_prog_stats *stats;
+ union {
+ struct work_struct work;
+ struct callback_head rcu;
+ };
+};
+
+enum bpf_prog_type {
+ BPF_PROG_TYPE_UNSPEC = 0,
+ BPF_PROG_TYPE_SOCKET_FILTER = 1,
+ BPF_PROG_TYPE_KPROBE = 2,
+ BPF_PROG_TYPE_SCHED_CLS = 3,
+ BPF_PROG_TYPE_SCHED_ACT = 4,
+ BPF_PROG_TYPE_TRACEPOINT = 5,
+ BPF_PROG_TYPE_XDP = 6,
+ BPF_PROG_TYPE_PERF_EVENT = 7,
+ BPF_PROG_TYPE_CGROUP_SKB = 8,
+ BPF_PROG_TYPE_CGROUP_SOCK = 9,
+ BPF_PROG_TYPE_LWT_IN = 10,
+ BPF_PROG_TYPE_LWT_OUT = 11,
+ BPF_PROG_TYPE_LWT_XMIT = 12,
+ BPF_PROG_TYPE_SOCK_OPS = 13,
+ BPF_PROG_TYPE_SK_SKB = 14,
+ BPF_PROG_TYPE_CGROUP_DEVICE = 15,
+ BPF_PROG_TYPE_SK_MSG = 16,
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 17,
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18,
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 19,
+ BPF_PROG_TYPE_LIRC_MODE2 = 20,
+ BPF_PROG_TYPE_SK_REUSEPORT = 21,
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 22,
+ BPF_PROG_TYPE_CGROUP_SYSCTL = 23,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24,
+ BPF_PROG_TYPE_CGROUP_SOCKOPT = 25,
+ BPF_PROG_TYPE_TRACING = 26,
+ BPF_PROG_TYPE_STRUCT_OPS = 27,
+ BPF_PROG_TYPE_EXT = 28,
+ BPF_PROG_TYPE_LSM = 29,
+};
+
+enum bpf_attach_type {
+ BPF_CGROUP_INET_INGRESS = 0,
+ BPF_CGROUP_INET_EGRESS = 1,
+ BPF_CGROUP_INET_SOCK_CREATE = 2,
+ BPF_CGROUP_SOCK_OPS = 3,
+ BPF_SK_SKB_STREAM_PARSER = 4,
+ BPF_SK_SKB_STREAM_VERDICT = 5,
+ BPF_CGROUP_DEVICE = 6,
+ BPF_SK_MSG_VERDICT = 7,
+ BPF_CGROUP_INET4_BIND = 8,
+ BPF_CGROUP_INET6_BIND = 9,
+ BPF_CGROUP_INET4_CONNECT = 10,
+ BPF_CGROUP_INET6_CONNECT = 11,
+ BPF_CGROUP_INET4_POST_BIND = 12,
+ BPF_CGROUP_INET6_POST_BIND = 13,
+ BPF_CGROUP_UDP4_SENDMSG = 14,
+ BPF_CGROUP_UDP6_SENDMSG = 15,
+ BPF_LIRC_MODE2 = 16,
+ BPF_FLOW_DISSECTOR = 17,
+ BPF_CGROUP_SYSCTL = 18,
+ BPF_CGROUP_UDP4_RECVMSG = 19,
+ BPF_CGROUP_UDP6_RECVMSG = 20,
+ BPF_CGROUP_GETSOCKOPT = 21,
+ BPF_CGROUP_SETSOCKOPT = 22,
+ BPF_TRACE_RAW_TP = 23,
+ BPF_TRACE_FENTRY = 24,
+ BPF_TRACE_FEXIT = 25,
+ BPF_MODIFY_RETURN = 26,
+ BPF_LSM_MAC = 27,
+ BPF_TRACE_ITER = 28,
+ BPF_CGROUP_INET4_GETPEERNAME = 29,
+ BPF_CGROUP_INET6_GETPEERNAME = 30,
+ BPF_CGROUP_INET4_GETSOCKNAME = 31,
+ BPF_CGROUP_INET6_GETSOCKNAME = 32,
+ BPF_XDP_DEVMAP = 33,
+ __MAX_BPF_ATTACH_TYPE = 34,
+};
+
+struct sock_filter {
+ __u16 code;
+ __u8 jt;
+ __u8 jf;
+ __u32 k;
+};
+
+struct sock_fprog_kern;
+
+struct bpf_prog {
+ u16 pages;
+ u16 jited: 1;
+ u16 jit_requested: 1;
+ u16 gpl_compatible: 1;
+ u16 cb_access: 1;
+ u16 dst_needed: 1;
+ u16 blinded: 1;
+ u16 is_func: 1;
+ u16 kprobe_override: 1;
+ u16 has_callchain_buf: 1;
+ u16 enforce_expected_attach_type: 1;
+ enum bpf_prog_type type;
+ enum bpf_attach_type expected_attach_type;
+ u32 len;
+ u32 jited_len;
+ u8 tag[8];
+ struct bpf_prog_aux *aux;
+ struct sock_fprog_kern *orig_prog;
+ unsigned int (*bpf_func)(const void *, const struct bpf_insn *);
+ struct sock_filter insns[0];
+ struct bpf_insn insnsi[0];
+};
+
+enum bpf_arg_type {
+ ARG_DONTCARE = 0,
+ ARG_CONST_MAP_PTR = 1,
+ ARG_PTR_TO_MAP_KEY = 2,
+ ARG_PTR_TO_MAP_VALUE = 3,
+ ARG_PTR_TO_UNINIT_MAP_VALUE = 4,
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = 5,
+ ARG_PTR_TO_MEM = 6,
+ ARG_PTR_TO_MEM_OR_NULL = 7,
+ ARG_PTR_TO_UNINIT_MEM = 8,
+ ARG_CONST_SIZE = 9,
+ ARG_CONST_SIZE_OR_ZERO = 10,
+ ARG_PTR_TO_CTX = 11,
+ ARG_PTR_TO_CTX_OR_NULL = 12,
+ ARG_ANYTHING = 13,
+ ARG_PTR_TO_SPIN_LOCK = 14,
+ ARG_PTR_TO_SOCK_COMMON = 15,
+ ARG_PTR_TO_INT = 16,
+ ARG_PTR_TO_LONG = 17,
+ ARG_PTR_TO_SOCKET = 18,
+ ARG_PTR_TO_BTF_ID = 19,
+ ARG_PTR_TO_ALLOC_MEM = 20,
+ ARG_PTR_TO_ALLOC_MEM_OR_NULL = 21,
+ ARG_CONST_ALLOC_SIZE_OR_ZERO = 22,
+};
+
+enum bpf_return_type {
+ RET_INTEGER = 0,
+ RET_VOID = 1,
+ RET_PTR_TO_MAP_VALUE = 2,
+ RET_PTR_TO_MAP_VALUE_OR_NULL = 3,
+ RET_PTR_TO_SOCKET_OR_NULL = 4,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = 5,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = 6,
+ RET_PTR_TO_ALLOC_MEM_OR_NULL = 7,
+};
+
+struct bpf_func_proto {
+ u64 (*func)(u64, u64, u64, u64, u64);
+ bool gpl_only;
+ bool pkt_access;
+ enum bpf_return_type ret_type;
+ union {
+ struct {
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+ };
+ enum bpf_arg_type arg_type[5];
+ };
+ int *btf_id;
+};
+
+enum bpf_access_type {
+ BPF_READ = 1,
+ BPF_WRITE = 2,
+};
+
+enum bpf_reg_type {
+ NOT_INIT = 0,
+ SCALAR_VALUE = 1,
+ PTR_TO_CTX = 2,
+ CONST_PTR_TO_MAP = 3,
+ PTR_TO_MAP_VALUE = 4,
+ PTR_TO_MAP_VALUE_OR_NULL = 5,
+ PTR_TO_STACK = 6,
+ PTR_TO_PACKET_META = 7,
+ PTR_TO_PACKET = 8,
+ PTR_TO_PACKET_END = 9,
+ PTR_TO_FLOW_KEYS = 10,
+ PTR_TO_SOCKET = 11,
+ PTR_TO_SOCKET_OR_NULL = 12,
+ PTR_TO_SOCK_COMMON = 13,
+ PTR_TO_SOCK_COMMON_OR_NULL = 14,
+ PTR_TO_TCP_SOCK = 15,
+ PTR_TO_TCP_SOCK_OR_NULL = 16,
+ PTR_TO_TP_BUFFER = 17,
+ PTR_TO_XDP_SOCK = 18,
+ PTR_TO_BTF_ID = 19,
+ PTR_TO_BTF_ID_OR_NULL = 20,
+ PTR_TO_MEM = 21,
+ PTR_TO_MEM_OR_NULL = 22,
+};
+
+struct bpf_verifier_log;
+
+struct bpf_insn_access_aux {
+ enum bpf_reg_type reg_type;
+ union {
+ int ctx_field_size;
+ u32 btf_id;
+ };
+ struct bpf_verifier_log *log;
+};
+
+struct bpf_prog_ops {
+ int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *);
+};
+
+struct bpf_verifier_ops {
+ const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *);
+ bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *);
+ int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *);
+ int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *);
+ u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *);
+ int (*btf_struct_access)(struct bpf_verifier_log *, const struct btf_type *, int, int, enum bpf_access_type, u32 *);
+};
+
+struct net_device;
+
+struct bpf_offload_dev;
+
+struct bpf_prog_offload {
+ struct bpf_prog *prog;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ void *dev_priv;
+ struct list_head offloads;
+ bool dev_state;
+ bool opt_failed;
+ void *jited_image;
+ u32 jited_len;
+};
+
+struct bpf_prog_stats {
+ u64 cnt;
+ u64 nsecs;
+ struct u64_stats_sync syncp;
+};
+
+struct btf_func_model {
+ u8 ret_size;
+ u8 nr_args;
+ u8 arg_size[12];
+};
+
+struct bpf_trampoline {
+ struct hlist_node hlist;
+ struct mutex mutex;
+ refcount_t refcnt;
+ u64 key;
+ struct {
+ struct btf_func_model model;
+ void *addr;
+ bool ftrace_managed;
+ } func;
+ struct bpf_prog *extension_prog;
+ struct hlist_head progs_hlist[3];
+ int progs_cnt[3];
+ void *image;
+ u64 selector;
+ struct bpf_ksym ksym;
+};
+
+struct bpf_func_info_aux {
+ u16 linkage;
+ bool unreliable;
+};
+
+struct bpf_jit_poke_descriptor {
+ void *ip;
+ union {
+ struct {
+ struct bpf_map *map;
+ u32 key;
+ } tail_call;
+ };
+ bool ip_stable;
+ u8 adj_off;
+ u16 reason;
+};
+
+struct bpf_ctx_arg_aux {
+ u32 offset;
+ enum bpf_reg_type reg_type;
+};
+
+struct bpf_cgroup_storage;
+
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ struct bpf_cgroup_storage *cgroup_storage[2];
+};
+
+struct bpf_storage_buffer;
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_cgroup_storage {
+ union {
+ struct bpf_storage_buffer *buf;
+ void *percpu_buf;
+ };
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list;
+ struct rb_node node;
+ struct callback_head rcu;
+};
+
+struct bpf_prog_array {
+ struct callback_head rcu;
+ struct bpf_prog_array_item items[0];
+};
+
+struct bpf_storage_buffer {
+ struct callback_head rcu;
+ char data[0];
+};
+
+struct cgroup_taskset;
+
+struct cftype;
+
+struct cgroup_subsys {
+ struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);
+ int (*css_online)(struct cgroup_subsys_state *);
+ void (*css_offline)(struct cgroup_subsys_state *);
+ void (*css_released)(struct cgroup_subsys_state *);
+ void (*css_free)(struct cgroup_subsys_state *);
+ void (*css_reset)(struct cgroup_subsys_state *);
+ void (*css_rstat_flush)(struct cgroup_subsys_state *, int);
+ int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *);
+ int (*can_attach)(struct cgroup_taskset *);
+ void (*cancel_attach)(struct cgroup_taskset *);
+ void (*attach)(struct cgroup_taskset *);
+ void (*post_attach)();
+ int (*can_fork)(struct task_struct *, struct css_set *);
+ void (*cancel_fork)(struct task_struct *, struct css_set *);
+ void (*fork)(struct task_struct *);
+ void (*exit)(struct task_struct *);
+ void (*release)(struct task_struct *);
+ void (*bind)(struct cgroup_subsys_state *);
+ bool early_init: 1;
+ bool implicit_on_dfl: 1;
+ bool threaded: 1;
+ bool broken_hierarchy: 1;
+ bool warned_broken_hierarchy: 1;
+ int id;
+ const char *name;
+ const char *legacy_name;
+ struct cgroup_root *root;
+ struct idr css_idr;
+ struct list_head cfts;
+ struct cftype *dfl_cftypes;
+ struct cftype *legacy_cftypes;
+ unsigned int depends_on;
+};
+
+struct cgroup_rstat_cpu {
+ struct u64_stats_sync bsync;
+ struct cgroup_base_stat bstat;
+ struct cgroup_base_stat last_bstat;
+ struct cgroup *updated_children;
+ struct cgroup *updated_next;
+};
+
+struct cgroup_root {
+ struct kernfs_root *kf_root;
+ unsigned int subsys_mask;
+ int hierarchy_id;
+ struct cgroup cgrp;
+ u64 cgrp_ancestor_id_storage;
+ atomic_t nr_cgrps;
+ struct list_head root_list;
+ unsigned int flags;
+ char release_agent_path[4096];
+ char name[64];
+};
+
+struct cftype {
+ char name[64];
+ long unsigned int private;
+ size_t max_write_len;
+ unsigned int flags;
+ unsigned int file_offset;
+ struct cgroup_subsys *ss;
+ struct list_head node;
+ struct kernfs_ops *kf_ops;
+ int (*open)(struct kernfs_open_file *);
+ void (*release)(struct kernfs_open_file *);
+ u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *);
+ s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *);
+ int (*seq_show)(struct seq_file *, void *);
+ void * (*seq_start)(struct seq_file *, loff_t *);
+ void * (*seq_next)(struct seq_file *, void *, loff_t *);
+ void (*seq_stop)(struct seq_file *, void *);
+ int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64);
+ int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64);
+ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t);
+ __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *);
+ struct lock_class_key lockdep_key;
+};
+
+enum kmalloc_cache_type {
+ KMALLOC_NORMAL = 0,
+ KMALLOC_RECLAIM = 1,
+ KMALLOC_DMA = 2,
+ NR_KMALLOC_TYPES = 3,
+};
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[0];
+};
+
+typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int);
+
+struct perf_raw_frag {
+ union {
+ struct perf_raw_frag *next;
+ long unsigned int pad;
+ };
+ perf_copy_f copy;
+ void *data;
+ u32 size;
+} __attribute__((packed));
+
+struct perf_raw_record {
+ struct perf_raw_frag frag;
+ u32 size;
+};
+
+struct perf_branch_stack {
+ __u64 nr;
+ __u64 hw_idx;
+ struct perf_branch_entry entries[0];
+};
+
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int active_oncpu;
+ int exclusive;
+ raw_spinlock_t hrtimer_lock;
+ struct hrtimer hrtimer;
+ ktime_t hrtimer_interval;
+ unsigned int hrtimer_active;
+ struct perf_cgroup *cgrp;
+ struct list_head cgrp_cpuctx_entry;
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+ int online;
+ int heap_size;
+ struct perf_event **heap;
+ struct perf_event *heap_default[2];
+};
+
+struct perf_output_handle {
+ struct perf_event *event;
+ struct perf_buffer *rb;
+ long unsigned int wakeup;
+ long unsigned int size;
+ u64 aux_flags;
+ union {
+ void *addr;
+ long unsigned int head;
+ };
+ int page;
+};
+
+struct perf_addr_filter_range {
+ long unsigned int start;
+ long unsigned int size;
+};
+
+struct perf_sample_data {
+ u64 addr;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ u64 period;
+ u64 weight;
+ u64 txn;
+ union perf_mem_data_src data_src;
+ u64 type;
+ u64 ip;
+ struct {
+ u32 pid;
+ u32 tid;
+ } tid_entry;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+ struct {
+ u32 cpu;
+ u32 reserved;
+ } cpu_entry;
+ struct perf_callchain_entry *callchain;
+ u64 aux_size;
+ struct perf_regs regs_user;
+ struct pt_regs regs_user_copy;
+ struct perf_regs regs_intr;
+ u64 stack_user_size;
+ u64 phys_addr;
+ u64 cgroup;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct perf_cgroup_info;
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info *info;
+};
+
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct trace_entry {
+ short unsigned int type;
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+};
+
+struct trace_array;
+
+struct tracer;
+
+struct array_buffer;
+
+struct ring_buffer_iter;
+
+struct trace_iterator {
+ struct trace_array *tr;
+ struct tracer *trace;
+ struct array_buffer *array_buffer;
+ void *private;
+ int cpu_file;
+ struct mutex mutex;
+ struct ring_buffer_iter **buffer_iter;
+ long unsigned int iter_flags;
+ void *temp;
+ unsigned int temp_size;
+ struct trace_seq tmp_seq;
+ cpumask_var_t started;
+ bool snapshot;
+ struct trace_seq seq;
+ struct trace_entry *ent;
+ long unsigned int lost_events;
+ int leftover;
+ int ent_size;
+ int cpu;
+ u64 ts;
+ loff_t pos;
+ long int idx;
+};
+
+enum print_line_t {
+ TRACE_TYPE_PARTIAL_LINE = 0,
+ TRACE_TYPE_HANDLED = 1,
+ TRACE_TYPE_UNHANDLED = 2,
+ TRACE_TYPE_NO_CONSUME = 3,
+};
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *);
+
+struct trace_event_functions {
+ trace_print_func trace;
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+};
+
+enum trace_reg {
+ TRACE_REG_REGISTER = 0,
+ TRACE_REG_UNREGISTER = 1,
+ TRACE_REG_PERF_REGISTER = 2,
+ TRACE_REG_PERF_UNREGISTER = 3,
+ TRACE_REG_PERF_OPEN = 4,
+ TRACE_REG_PERF_CLOSE = 5,
+ TRACE_REG_PERF_ADD = 6,
+ TRACE_REG_PERF_DEL = 7,
+};
+
+struct trace_event_fields {
+ const char *type;
+ union {
+ struct {
+ const char *name;
+ const int size;
+ const int align;
+ const int is_signed;
+ const int filter_type;
+ };
+ int (*define_fields)(struct trace_event_call *);
+ };
+};
+
+struct trace_event_class {
+ const char *system;
+ void *probe;
+ void *perf_probe;
+ int (*reg)(struct trace_event_call *, enum trace_reg, void *);
+ struct trace_event_fields *fields_array;
+ struct list_head * (*get_fields)(struct trace_event_call *);
+ struct list_head fields;
+ int (*raw_init)(struct trace_event_call *);
+};
+
+struct trace_buffer;
+
+struct trace_event_file;
+
+struct trace_event_buffer {
+ struct trace_buffer *buffer;
+ struct ring_buffer_event *event;
+ struct trace_event_file *trace_file;
+ void *entry;
+ long unsigned int flags;
+ int pc;
+ struct pt_regs *regs;
+};
+
+struct trace_subsystem_dir;
+
+struct trace_event_file {
+ struct list_head list;
+ struct trace_event_call *event_call;
+ struct event_filter *filter;
+ struct dentry *dir;
+ struct trace_array *tr;
+ struct trace_subsystem_dir *system;
+ struct list_head triggers;
+ long unsigned int flags;
+ atomic_t sm_ref;
+ atomic_t tm_ref;
+};
+
+enum {
+ TRACE_EVENT_FL_FILTERED_BIT = 0,
+ TRACE_EVENT_FL_CAP_ANY_BIT = 1,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2,
+ TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3,
+ TRACE_EVENT_FL_TRACEPOINT_BIT = 4,
+ TRACE_EVENT_FL_KPROBE_BIT = 5,
+ TRACE_EVENT_FL_UPROBE_BIT = 6,
+};
+
+enum {
+ TRACE_EVENT_FL_FILTERED = 1,
+ TRACE_EVENT_FL_CAP_ANY = 2,
+ TRACE_EVENT_FL_NO_SET_FILTER = 4,
+ TRACE_EVENT_FL_IGNORE_ENABLE = 8,
+ TRACE_EVENT_FL_TRACEPOINT = 16,
+ TRACE_EVENT_FL_KPROBE = 32,
+ TRACE_EVENT_FL_UPROBE = 64,
+};
+
+enum {
+ EVENT_FILE_FL_ENABLED_BIT = 0,
+ EVENT_FILE_FL_RECORDED_CMD_BIT = 1,
+ EVENT_FILE_FL_RECORDED_TGID_BIT = 2,
+ EVENT_FILE_FL_FILTERED_BIT = 3,
+ EVENT_FILE_FL_NO_SET_FILTER_BIT = 4,
+ EVENT_FILE_FL_SOFT_MODE_BIT = 5,
+ EVENT_FILE_FL_SOFT_DISABLED_BIT = 6,
+ EVENT_FILE_FL_TRIGGER_MODE_BIT = 7,
+ EVENT_FILE_FL_TRIGGER_COND_BIT = 8,
+ EVENT_FILE_FL_PID_FILTER_BIT = 9,
+ EVENT_FILE_FL_WAS_ENABLED_BIT = 10,
+};
+
+enum {
+ EVENT_FILE_FL_ENABLED = 1,
+ EVENT_FILE_FL_RECORDED_CMD = 2,
+ EVENT_FILE_FL_RECORDED_TGID = 4,
+ EVENT_FILE_FL_FILTERED = 8,
+ EVENT_FILE_FL_NO_SET_FILTER = 16,
+ EVENT_FILE_FL_SOFT_MODE = 32,
+ EVENT_FILE_FL_SOFT_DISABLED = 64,
+ EVENT_FILE_FL_TRIGGER_MODE = 128,
+ EVENT_FILE_FL_TRIGGER_COND = 256,
+ EVENT_FILE_FL_PID_FILTER = 512,
+ EVENT_FILE_FL_WAS_ENABLED = 1024,
+};
+
+enum {
+ FILTER_OTHER = 0,
+ FILTER_STATIC_STRING = 1,
+ FILTER_DYN_STRING = 2,
+ FILTER_PTR_STRING = 3,
+ FILTER_TRACE_FN = 4,
+ FILTER_COMM = 5,
+ FILTER_CPU = 6,
+};
+
+struct property {
+ char *name;
+ int length;
+ void *value;
+ struct property *next;
+};
+
+struct irq_fwspec {
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[16];
+};
+
+struct irq_data;
+
+struct irq_domain_ops {
+ int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token);
+ int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token);
+ int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t);
+ void (*unmap)(struct irq_domain *, unsigned int);
+ int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *);
+ int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *);
+ void (*free)(struct irq_domain *, unsigned int, unsigned int);
+ int (*activate)(struct irq_domain *, struct irq_data *, bool);
+ void (*deactivate)(struct irq_domain *, struct irq_data *);
+ int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *);
+};
+
+struct acpi_table_header {
+ char signature[4];
+ u32 length;
+ u8 revision;
+ u8 checksum;
+ char oem_id[6];
+ char oem_table_id[8];
+ u32 oem_revision;
+ char asl_compiler_id[4];
+ u32 asl_compiler_revision;
+};
+
+struct acpi_generic_address {
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_width;
+ u64 address;
+} __attribute__((packed));
+
+struct acpi_table_fadt {
+ struct acpi_table_header header;
+ u32 facs;
+ u32 dsdt;
+ u8 model;
+ u8 preferred_profile;
+ u16 sci_interrupt;
+ u32 smi_command;
+ u8 acpi_enable;
+ u8 acpi_disable;
+ u8 s4_bios_request;
+ u8 pstate_control;
+ u32 pm1a_event_block;
+ u32 pm1b_event_block;
+ u32 pm1a_control_block;
+ u32 pm1b_control_block;
+ u32 pm2_control_block;
+ u32 pm_timer_block;
+ u32 gpe0_block;
+ u32 gpe1_block;
+ u8 pm1_event_length;
+ u8 pm1_control_length;
+ u8 pm2_control_length;
+ u8 pm_timer_length;
+ u8 gpe0_block_length;
+ u8 gpe1_block_length;
+ u8 gpe1_base;
+ u8 cst_control;
+ u16 c2_latency;
+ u16 c3_latency;
+ u16 flush_size;
+ u16 flush_stride;
+ u8 duty_offset;
+ u8 duty_width;
+ u8 day_alarm;
+ u8 month_alarm;
+ u8 century;
+ u16 boot_flags;
+ u8 reserved;
+ u32 flags;
+ struct acpi_generic_address reset_register;
+ u8 reset_value;
+ u16 arm_boot_flags;
+ u8 minor_revision;
+ u64 Xfacs;
+ u64 Xdsdt;
+ struct acpi_generic_address xpm1a_event_block;
+ struct acpi_generic_address xpm1b_event_block;
+ struct acpi_generic_address xpm1a_control_block;
+ struct acpi_generic_address xpm1b_control_block;
+ struct acpi_generic_address xpm2_control_block;
+ struct acpi_generic_address xpm_timer_block;
+ struct acpi_generic_address xgpe0_block;
+ struct acpi_generic_address xgpe1_block;
+ struct acpi_generic_address sleep_control;
+ struct acpi_generic_address sleep_status;
+ u64 hypervisor_id;
+} __attribute__((packed));
+
+enum acpi_irq_model_id {
+ ACPI_IRQ_MODEL_PIC = 0,
+ ACPI_IRQ_MODEL_IOAPIC = 1,
+ ACPI_IRQ_MODEL_IOSAPIC = 2,
+ ACPI_IRQ_MODEL_PLATFORM = 3,
+ ACPI_IRQ_MODEL_GIC = 4,
+ ACPI_IRQ_MODEL_COUNT = 5,
+};
+
+enum con_scroll {
+ SM_UP = 0,
+ SM_DOWN = 1,
+};
+
+struct vc_data;
+
+struct console_font;
+
+struct consw {
+ struct module *owner;
+ const char * (*con_startup)();
+ void (*con_init)(struct vc_data *, int);
+ void (*con_deinit)(struct vc_data *);
+ void (*con_clear)(struct vc_data *, int, int, int, int);
+ void (*con_putc)(struct vc_data *, int, int, int);
+ void (*con_putcs)(struct vc_data *, const short unsigned int *, int, int, int);
+ void (*con_cursor)(struct vc_data *, int);
+ bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int);
+ int (*con_switch)(struct vc_data *);
+ int (*con_blank)(struct vc_data *, int, int);
+ int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int);
+ int (*con_font_get)(struct vc_data *, struct console_font *);
+ int (*con_font_default)(struct vc_data *, struct console_font *, char *);
+ int (*con_font_copy)(struct vc_data *, int);
+ int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int);
+ void (*con_set_palette)(struct vc_data *, const unsigned char *);
+ void (*con_scrolldelta)(struct vc_data *, int);
+ int (*con_set_origin)(struct vc_data *);
+ void (*con_save_screen)(struct vc_data *);
+ u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
+ void (*con_invert_region)(struct vc_data *, u16 *, int);
+ u16 * (*con_screen_pos)(struct vc_data *, int);
+ long unsigned int (*con_getxy)(struct vc_data *, long unsigned int, int *, int *);
+ void (*con_flush_scrollback)(struct vc_data *);
+ int (*con_debug_enter)(struct vc_data *);
+ int (*con_debug_leave)(struct vc_data *);
+};
+
+struct tty_driver;
+
+struct console {
+ char name[16];
+ void (*write)(struct console *, const char *, unsigned int);
+ int (*read)(struct console *, char *, unsigned int);
+ struct tty_driver * (*device)(struct console *, int *);
+ void (*unblank)();
+ int (*setup)(struct console *, char *);
+ int (*exit)(struct console *);
+ int (*match)(struct console *, char *, int, char *);
+ short int flags;
+ short int index;
+ int cflag;
+ void *data;
+ struct console *next;
+};
+
+enum wb_stat_item {
+ WB_RECLAIMABLE = 0,
+ WB_WRITEBACK = 1,
+ WB_DIRTIED = 2,
+ WB_WRITTEN = 3,
+ NR_WB_STAT_ITEMS = 4,
+};
+
+struct bdi_writeback_congested {
+ long unsigned int state;
+ refcount_t refcnt;
+ struct backing_dev_info *__bdi;
+ int blkcg_id;
+ struct rb_node rb_node;
+};
+
+struct blkg_iostat {
+ u64 bytes[3];
+ u64 ios[3];
+};
+
+struct blkg_iostat_set {
+ struct u64_stats_sync sync;
+ struct blkg_iostat cur;
+ struct blkg_iostat last;
+};
+
+struct blkcg;
+
+struct blkg_policy_data;
+
+struct blkcg_gq {
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+ struct bdi_writeback_congested *wb_congested;
+ struct blkcg_gq *parent;
+ struct percpu_ref refcnt;
+ bool online;
+ struct blkg_iostat_set *iostat_cpu;
+ struct blkg_iostat_set iostat;
+ struct blkg_policy_data *pd[5];
+ spinlock_t async_bio_lock;
+ struct bio_list async_bios;
+ struct work_struct async_bio_work;
+ atomic_t use_delay;
+ atomic64_t delay_nsec;
+ atomic64_t delay_start;
+ u64 last_delay;
+ int last_use;
+ struct callback_head callback_head;
+};
+
+struct partition_meta_info {
+ char uuid[37];
+ u8 volname[64];
+};
+
+struct disk_part_tbl {
+ struct callback_head callback_head;
+ int len;
+ struct hd_struct *last_lookup;
+ struct hd_struct *part[0];
+};
+
+struct blk_zone;
+
+typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *);
+
+struct hd_geometry;
+
+struct pr_ops;
+
+struct block_device_operations {
+ int (*open)(struct block_device *, fmode_t);
+ void (*release)(struct gendisk *, fmode_t);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
+ int (*ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int);
+ int (*compat_ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int);
+ unsigned int (*check_events)(struct gendisk *, unsigned int);
+ int (*media_changed)(struct gendisk *);
+ void (*unlock_native_capacity)(struct gendisk *);
+ int (*revalidate_disk)(struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
+ void (*swap_slot_free_notify)(struct block_device *, long unsigned int);
+ int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *);
+ char * (*devnode)(struct gendisk *, umode_t *);
+ struct module *owner;
+ const struct pr_ops *pr_ops;
+};
+
+struct sg_io_v4 {
+ __s32 guard;
+ __u32 protocol;
+ __u32 subprotocol;
+ __u32 request_len;
+ __u64 request;
+ __u64 request_tag;
+ __u32 request_attr;
+ __u32 request_priority;
+ __u32 request_extra;
+ __u32 max_response_len;
+ __u64 response;
+ __u32 dout_iovec_count;
+ __u32 dout_xfer_len;
+ __u32 din_iovec_count;
+ __u32 din_xfer_len;
+ __u64 dout_xferp;
+ __u64 din_xferp;
+ __u32 timeout;
+ __u32 flags;
+ __u64 usr_ptr;
+ __u32 spare_in;
+ __u32 driver_status;
+ __u32 transport_status;
+ __u32 device_status;
+ __u32 retry_delay;
+ __u32 info;
+ __u32 duration;
+ __u32 response_len;
+ __s32 din_resid;
+ __s32 dout_resid;
+ __u64 generated_tag;
+ __u32 spare_out;
+ __u32 padding;
+};
+
+struct bsg_ops {
+ int (*check_proto)(struct sg_io_v4 *);
+ int (*fill_hdr)(struct request *, struct sg_io_v4 *, fmode_t);
+ int (*complete_rq)(struct request *, struct sg_io_v4 *);
+ void (*free_rq)(struct request *);
+};
+
+typedef __u32 req_flags_t;
+
+typedef void rq_end_io_fn(struct request *, blk_status_t);
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+ unsigned int cmd_flags;
+ req_flags_t rq_flags;
+ int tag;
+ int internal_tag;
+ unsigned int __data_len;
+ sector_t __sector;
+ struct bio *bio;
+ struct bio *biotail;
+ struct list_head queuelist;
+ union {
+ struct hlist_node hash;
+ struct list_head ipi_list;
+ };
+ union {
+ struct rb_node rb_node;
+ struct bio_vec special_vec;
+ void *completion_data;
+ int error_count;
+ };
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+ struct gendisk *rq_disk;
+ struct hd_struct *part;
+ u64 start_time_ns;
+ u64 io_start_time_ns;
+ short unsigned int stats_sectors;
+ short unsigned int nr_phys_segments;
+ short unsigned int write_hint;
+ short unsigned int ioprio;
+ enum mq_rq_state state;
+ refcount_t ref;
+ unsigned int timeout;
+ long unsigned int deadline;
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+struct blk_zone {
+ __u64 start;
+ __u64 len;
+ __u64 wp;
+ __u8 type;
+ __u8 cond;
+ __u8 non_seq;
+ __u8 reset;
+ __u8 reserved[36];
+};
+
+enum elv_merge {
+ ELEVATOR_NO_MERGE = 0,
+ ELEVATOR_FRONT_MERGE = 1,
+ ELEVATOR_BACK_MERGE = 2,
+ ELEVATOR_DISCARD_MERGE = 3,
+};
+
+struct elevator_type;
+
+struct blk_mq_alloc_data;
+
+struct elevator_mq_ops {
+ int (*init_sched)(struct request_queue *, struct elevator_type *);
+ void (*exit_sched)(struct elevator_queue *);
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*depth_updated)(struct blk_mq_hw_ctx *);
+ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+ bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
+ int (*request_merge)(struct request_queue *, struct request **, struct bio *);
+ void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
+ void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+ void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
+ void (*prepare_request)(struct request *);
+ void (*finish_request)(struct request *);
+ void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
+ struct request * (*dispatch_request)(struct blk_mq_hw_ctx *);
+ bool (*has_work)(struct blk_mq_hw_ctx *);
+ void (*completed_request)(struct request *, u64);
+ void (*requeue_request)(struct request *);
+ struct request * (*former_request)(struct request_queue *, struct request *);
+ struct request * (*next_request)(struct request_queue *, struct request *);
+ void (*init_icq)(struct io_cq *);
+ void (*exit_icq)(struct io_cq *);
+};
+
+struct elv_fs_entry;
+
+struct blk_mq_debugfs_attr;
+
+struct elevator_type {
+ struct kmem_cache *icq_cache;
+ struct elevator_mq_ops ops;
+ size_t icq_size;
+ size_t icq_align;
+ struct elv_fs_entry *elevator_attrs;
+ const char *elevator_name;
+ const char *elevator_alias;
+ const unsigned int elevator_features;
+ struct module *elevator_owner;
+ const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
+ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
+ char icq_cache_name[22];
+ struct list_head list;
+};
+
+struct elevator_queue {
+ struct elevator_type *type;
+ void *elevator_data;
+ struct kobject kobj;
+ struct mutex sysfs_lock;
+ unsigned int registered: 1;
+ struct hlist_head hash[64];
+};
+
+struct elv_fs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct elevator_queue *, char *);
+ ssize_t (*store)(struct elevator_queue *, const char *, size_t);
+};
+
+struct blk_mq_debugfs_attr {
+ const char *name;
+ umode_t mode;
+ int (*show)(void *, struct seq_file *);
+ ssize_t (*write)(void *, const char *, size_t, loff_t *);
+ const struct seq_operations *seq_ops;
+};
+
+struct blk_mq_queue_data;
+
+typedef blk_status_t queue_rq_fn(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+
+typedef void commit_rqs_fn(struct blk_mq_hw_ctx *);
+
+typedef bool get_budget_fn(struct blk_mq_hw_ctx *);
+
+typedef void put_budget_fn(struct blk_mq_hw_ctx *);
+
+enum blk_eh_timer_return {
+ BLK_EH_DONE = 0,
+ BLK_EH_RESET_TIMER = 1,
+};
+
+typedef enum blk_eh_timer_return timeout_fn(struct request *, bool);
+
+typedef int poll_fn(struct blk_mq_hw_ctx *);
+
+typedef void complete_fn(struct request *);
+
+typedef int init_hctx_fn(struct blk_mq_hw_ctx *, void *, unsigned int);
+
+typedef void exit_hctx_fn(struct blk_mq_hw_ctx *, unsigned int);
+
+typedef int init_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int);
+
+typedef void exit_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int);
+
+typedef void cleanup_rq_fn(struct request *);
+
+typedef bool busy_fn(struct request_queue *);
+
+typedef int map_queues_fn(struct blk_mq_tag_set *);
+
+struct blk_mq_ops {
+ queue_rq_fn *queue_rq;
+ commit_rqs_fn *commit_rqs;
+ get_budget_fn *get_budget;
+ put_budget_fn *put_budget;
+ timeout_fn *timeout;
+ poll_fn *poll;
+ complete_fn *complete;
+ init_hctx_fn *init_hctx;
+ exit_hctx_fn *exit_hctx;
+ init_request_fn *init_request;
+ exit_request_fn *exit_request;
+ void (*initialize_rq_fn)(struct request *);
+ cleanup_rq_fn *cleanup_rq;
+ busy_fn *busy;
+ map_queues_fn *map_queues;
+ void (*show_rq)(struct seq_file *, struct request *);
+};
+
+enum pr_type {
+ PR_WRITE_EXCLUSIVE = 1,
+ PR_EXCLUSIVE_ACCESS = 2,
+ PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
+ PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
+ PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
+ PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
+};
+
+struct pr_ops {
+ int (*pr_register)(struct block_device *, u64, u64, u32);
+ int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32);
+ int (*pr_release)(struct block_device *, u64, enum pr_type);
+ int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool);
+ int (*pr_clear)(struct block_device *, u64);
+};
+
+enum blkg_iostat_type {
+ BLKG_IOSTAT_READ = 0,
+ BLKG_IOSTAT_WRITE = 1,
+ BLKG_IOSTAT_DISCARD = 2,
+ BLKG_IOSTAT_NR = 3,
+};
+
+struct blkcg_policy_data;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+ refcount_t online_pin;
+ struct xarray blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+ struct blkcg_policy_data *cpd[5];
+ struct list_head all_blkcgs_node;
+ struct list_head cgwb_list;
+};
+
+struct blkcg_policy_data {
+ struct blkcg *blkcg;
+ int plid;
+};
+
+struct blkg_policy_data {
+ struct blkcg_gq *blkg;
+ int plid;
+};
+
+enum reboot_mode {
+ REBOOT_UNDEFINED = -1,
+ REBOOT_COLD = 0,
+ REBOOT_WARM = 1,
+ REBOOT_HARD = 2,
+ REBOOT_SOFT = 3,
+ REBOOT_GPIO = 4,
+};
+
+enum reboot_type {
+ BOOT_TRIPLE = 116,
+ BOOT_KBD = 107,
+ BOOT_BIOS = 98,
+ BOOT_ACPI = 97,
+ BOOT_EFI = 101,
+ BOOT_CF9_FORCE = 112,
+ BOOT_CF9_SAFE = 113,
+};
+
+typedef long unsigned int efi_status_t;
+
+typedef u8 efi_bool_t;
+
+typedef u16 efi_char16_t;
+
+typedef guid_t efi_guid_t;
+
+typedef struct {
+ u64 signature;
+ u32 revision;
+ u32 headersize;
+ u32 crc32;
+ u32 reserved;
+} efi_table_hdr_t;
+
+typedef struct {
+ u32 type;
+ u32 pad;
+ u64 phys_addr;
+ u64 virt_addr;
+ u64 num_pages;
+ u64 attribute;
+} efi_memory_desc_t;
+
+typedef struct {
+ efi_guid_t guid;
+ u32 headersize;
+ u32 flags;
+ u32 imagesize;
+} efi_capsule_header_t;
+
+typedef struct {
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 pad1;
+ u32 nanosecond;
+ s16 timezone;
+ u8 daylight;
+ u8 pad2;
+} efi_time_t;
+
+typedef struct {
+ u32 resolution;
+ u32 accuracy;
+ u8 sets_to_zero;
+} efi_time_cap_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 get_time;
+ u32 set_time;
+ u32 get_wakeup_time;
+ u32 set_wakeup_time;
+ u32 set_virtual_address_map;
+ u32 convert_pointer;
+ u32 get_variable;
+ u32 get_next_variable;
+ u32 set_variable;
+ u32 get_next_high_mono_count;
+ u32 reset_system;
+ u32 update_capsule;
+ u32 query_capsule_caps;
+ u32 query_variable_info;
+} efi_runtime_services_32_t;
+
+typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *);
+
+typedef efi_status_t efi_set_time_t(efi_time_t *);
+
+typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *);
+
+typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *);
+
+typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *);
+
+typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *);
+
+typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *);
+
+typedef efi_status_t efi_get_next_high_mono_count_t(u32 *);
+
+typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *);
+
+typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *);
+
+typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int);
+
+typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *);
+
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ efi_status_t (*get_time)(efi_time_t *, efi_time_cap_t *);
+ efi_status_t (*set_time)(efi_time_t *);
+ efi_status_t (*get_wakeup_time)(efi_bool_t *, efi_bool_t *, efi_time_t *);
+ efi_status_t (*set_wakeup_time)(efi_bool_t, efi_time_t *);
+ efi_status_t (*set_virtual_address_map)(long unsigned int, long unsigned int, u32, efi_memory_desc_t *);
+ void *convert_pointer;
+ efi_status_t (*get_variable)(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *);
+ efi_status_t (*get_next_variable)(long unsigned int *, efi_char16_t *, efi_guid_t *);
+ efi_status_t (*set_variable)(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *);
+ efi_status_t (*get_next_high_mono_count)(u32 *);
+ void (*reset_system)(int, efi_status_t, long unsigned int, efi_char16_t *);
+ efi_status_t (*update_capsule)(efi_capsule_header_t **, long unsigned int, long unsigned int);
+ efi_status_t (*query_capsule_caps)(efi_capsule_header_t **, long unsigned int, u64 *, int *);
+ efi_status_t (*query_variable_info)(u32, u64 *, u64 *, u64 *);
+ };
+ efi_runtime_services_32_t mixed_mode;
+} efi_runtime_services_t;
+
+struct efi_memory_map {
+ phys_addr_t phys_map;
+ void *map;
+ void *map_end;
+ int nr_map;
+ long unsigned int desc_version;
+ long unsigned int desc_size;
+ long unsigned int flags;
+};
+
+struct efi {
+ const efi_runtime_services_t *runtime;
+ unsigned int runtime_version;
+ unsigned int runtime_supported_mask;
+ long unsigned int acpi;
+ long unsigned int acpi20;
+ long unsigned int smbios;
+ long unsigned int smbios3;
+ long unsigned int esrt;
+ long unsigned int tpm_log;
+ long unsigned int tpm_final_log;
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+ struct efi_memory_map memmap;
+ long unsigned int flags;
+};
+
+enum efi_rts_ids {
+ EFI_NONE = 0,
+ EFI_GET_TIME = 1,
+ EFI_SET_TIME = 2,
+ EFI_GET_WAKEUP_TIME = 3,
+ EFI_SET_WAKEUP_TIME = 4,
+ EFI_GET_VARIABLE = 5,
+ EFI_GET_NEXT_VARIABLE = 6,
+ EFI_SET_VARIABLE = 7,
+ EFI_QUERY_VARIABLE_INFO = 8,
+ EFI_GET_NEXT_HIGH_MONO_COUNT = 9,
+ EFI_RESET_SYSTEM = 10,
+ EFI_UPDATE_CAPSULE = 11,
+ EFI_QUERY_CAPSULE_CAPS = 12,
+};
+
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+enum memcg_stat_item {
+ MEMCG_SWAP = 33,
+ MEMCG_SOCK = 34,
+ MEMCG_KERNEL_STACK_KB = 35,
+ MEMCG_NR_STAT = 36,
+};
+
+enum memcg_memory_event {
+ MEMCG_LOW = 0,
+ MEMCG_HIGH = 1,
+ MEMCG_MAX = 2,
+ MEMCG_OOM = 3,
+ MEMCG_OOM_KILL = 4,
+ MEMCG_SWAP_HIGH = 5,
+ MEMCG_SWAP_MAX = 6,
+ MEMCG_SWAP_FAIL = 7,
+ MEMCG_NR_MEMORY_EVENTS = 8,
+};
+
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH = 0,
+ MEM_CGROUP_TARGET_SOFTLIMIT = 1,
+ MEM_CGROUP_NTARGETS = 2,
+};
+
+struct memcg_vmstats_percpu {
+ long int stat[36];
+ long unsigned int events[65];
+ long unsigned int nr_page_events;
+ long unsigned int targets[2];
+};
+
+struct mem_cgroup_reclaim_iter {
+ struct mem_cgroup *position;
+ unsigned int generation;
+};
+
+struct lruvec_stat {
+ long int count[33];
+};
+
+struct memcg_shrinker_map {
+ struct callback_head rcu;
+ long unsigned int map[0];
+};
+
+struct mem_cgroup_per_node {
+ struct lruvec lruvec;
+ struct lruvec_stat *lruvec_stat_local;
+ struct lruvec_stat *lruvec_stat_cpu;
+ atomic_long_t lruvec_stat[33];
+ long unsigned int lru_zone_size[25];
+ struct mem_cgroup_reclaim_iter iter;
+ struct memcg_shrinker_map *shrinker_map;
+ struct rb_node tree_node;
+ long unsigned int usage_in_excess;
+ bool on_tree;
+ struct mem_cgroup *memcg;
+};
+
+struct eventfd_ctx;
+
+struct mem_cgroup_threshold {
+ struct eventfd_ctx *eventfd;
+ long unsigned int threshold;
+};
+
+struct mem_cgroup_threshold_ary {
+ int current_threshold;
+ unsigned int size;
+ struct mem_cgroup_threshold entries[0];
+};
+
+struct percpu_cluster {
+ struct swap_cluster_info index;
+ unsigned int next;
+};
+
+enum fs_value_type {
+ fs_value_is_undefined = 0,
+ fs_value_is_flag = 1,
+ fs_value_is_string = 2,
+ fs_value_is_blob = 3,
+ fs_value_is_filename = 4,
+ fs_value_is_file = 5,
+};
+
+struct fs_parameter {
+ const char *key;
+ enum fs_value_type type: 8;
+ union {
+ char *string;
+ void *blob;
+ struct filename *name;
+ struct file *file;
+ };
+ size_t size;
+ int dirfd;
+};
+
+struct fc_log {
+ refcount_t usage;
+ u8 head;
+ u8 tail;
+ u8 need_free;
+ struct module *owner;
+ char *buffer[8];
+};
+
+struct fs_context_operations {
+ void (*free)(struct fs_context *);
+ int (*dup)(struct fs_context *, struct fs_context *);
+ int (*parse_param)(struct fs_context *, struct fs_parameter *);
+ int (*parse_monolithic)(struct fs_context *, void *);
+ int (*get_tree)(struct fs_context *);
+ int (*reconfigure)(struct fs_context *);
+};
+
+struct fs_parse_result {
+ bool negated;
+ union {
+ bool boolean;
+ int int_32;
+ unsigned int uint_32;
+ u64 uint_64;
+ };
+};
+
+struct trace_event_raw_initcall_level {
+ struct trace_entry ent;
+ u32 __data_loc_level;
+ char __data[0];
+};
+
+struct trace_event_raw_initcall_start {
+ struct trace_entry ent;
+ initcall_t func;
+ char __data[0];
+};
+
+struct trace_event_raw_initcall_finish {
+ struct trace_entry ent;
+ initcall_t func;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_initcall_level {
+ u32 level;
+};
+
+struct trace_event_data_offsets_initcall_start {};
+
+struct trace_event_data_offsets_initcall_finish {};
+
+typedef void (*btf_trace_initcall_level)(void *, const char *);
+
+typedef void (*btf_trace_initcall_start)(void *, initcall_t);
+
+typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int);
+
+struct blacklist_entry {
+ struct list_head next;
+ char *buf;
+};
+
+typedef int pao_T__;
+
+typedef __u32 Elf32_Word;
+
+struct elf32_note {
+ Elf32_Word n_namesz;
+ Elf32_Word n_descsz;
+ Elf32_Word n_type;
+};
+
+enum {
+ UNAME26 = 131072,
+ ADDR_NO_RANDOMIZE = 262144,
+ FDPIC_FUNCPTRS = 524288,
+ MMAP_PAGE_ZERO = 1048576,
+ ADDR_COMPAT_LAYOUT = 2097152,
+ READ_IMPLIES_EXEC = 4194304,
+ ADDR_LIMIT_32BIT = 8388608,
+ SHORT_INODE = 16777216,
+ WHOLE_SECONDS = 33554432,
+ STICKY_TIMEOUTS = 67108864,
+ ADDR_LIMIT_3GB = 134217728,
+};
+
+enum tlb_infos {
+ ENTRIES = 0,
+ NR_INFO = 1,
+};
+
+enum {
+ MM_FILEPAGES = 0,
+ MM_ANONPAGES = 1,
+ MM_SWAPENTS = 2,
+ MM_SHMEMPAGES = 3,
+ NR_MM_COUNTERS = 4,
+};
+
+enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC = 0,
+ HRTIMER_BASE_REALTIME = 1,
+ HRTIMER_BASE_BOOTTIME = 2,
+ HRTIMER_BASE_TAI = 3,
+ HRTIMER_BASE_MONOTONIC_SOFT = 4,
+ HRTIMER_BASE_REALTIME_SOFT = 5,
+ HRTIMER_BASE_BOOTTIME_SOFT = 6,
+ HRTIMER_BASE_TAI_SOFT = 7,
+ HRTIMER_MAX_CLOCK_BASES = 8,
+};
+
+enum rseq_cs_flags_bit {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
+};
+
+enum perf_event_task_context {
+ perf_invalid_context = -1,
+ perf_hw_context = 0,
+ perf_sw_context = 1,
+ perf_nr_task_contexts = 2,
+};
+
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = 0,
+ RSEQ_EVENT_SIGNAL_BIT = 1,
+ RSEQ_EVENT_MIGRATE_BIT = 2,
+};
+
+enum {
+ PROC_ROOT_INO = 1,
+ PROC_IPC_INIT_INO = -268435457,
+ PROC_UTS_INIT_INO = -268435458,
+ PROC_USER_INIT_INO = -268435459,
+ PROC_PID_INIT_INO = -268435460,
+ PROC_CGROUP_INIT_INO = -268435461,
+ PROC_TIME_INIT_INO = -268435462,
+};
+
+typedef __u16 __le16;
+
+typedef __u16 __be16;
+
+typedef __u32 __be32;
+
+typedef __u64 __be64;
+
+typedef __u32 __wsum;
+
+typedef u64 uint64_t;
+
+typedef unsigned int slab_flags_t;
+
+struct __va_list_tag {
+ unsigned int gp_offset;
+ unsigned int fp_offset;
+ void *overflow_arg_area;
+ void *reg_save_area;
+};
+
+typedef __builtin_va_list __gnuc_va_list;
+
+typedef __gnuc_va_list va_list;
+
+struct raw_notifier_head {
+ struct notifier_block *head;
+};
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+typedef struct __call_single_data call_single_data_t;
+
+struct ida {
+ struct xarray xa;
+};
+
+typedef __u64 __addrpair;
+
+typedef __u32 __portpair;
+
+typedef struct {
+ struct net *net;
+} possible_net_t;
+
+struct in6_addr {
+ union {
+ __u8 u6_addr8[16];
+ __be16 u6_addr16[8];
+ __be32 u6_addr32[4];
+ } in6_u;
+};
+
+struct hlist_nulls_node {
+ struct hlist_nulls_node *next;
+ struct hlist_nulls_node **pprev;
+};
+
+struct proto;
+
+struct inet_timewait_death_row;
+
+struct sock_common {
+ union {
+ __addrpair skc_addrpair;
+ struct {
+ __be32 skc_daddr;
+ __be32 skc_rcv_saddr;
+ };
+ };
+ union {
+ unsigned int skc_hash;
+ __u16 skc_u16hashes[2];
+ };
+ union {
+ __portpair skc_portpair;
+ struct {
+ __be16 skc_dport;
+ __u16 skc_num;
+ };
+ };
+ short unsigned int skc_family;
+ volatile unsigned char skc_state;
+ unsigned char skc_reuse: 4;
+ unsigned char skc_reuseport: 1;
+ unsigned char skc_ipv6only: 1;
+ unsigned char skc_net_refcnt: 1;
+ int skc_bound_dev_if;
+ union {
+ struct hlist_node skc_bind_node;
+ struct hlist_node skc_portaddr_node;
+ };
+ struct proto *skc_prot;
+ possible_net_t skc_net;
+ struct in6_addr skc_v6_daddr;
+ struct in6_addr skc_v6_rcv_saddr;
+ atomic64_t skc_cookie;
+ union {
+ long unsigned int skc_flags;
+ struct sock *skc_listener;
+ struct inet_timewait_death_row *skc_tw_dr;
+ };
+ int skc_dontcopy_begin[0];
+ union {
+ struct hlist_node skc_node;
+ struct hlist_nulls_node skc_nulls_node;
+ };
+ short unsigned int skc_tx_queue_mapping;
+ short unsigned int skc_rx_queue_mapping;
+ union {
+ int skc_incoming_cpu;
+ u32 skc_rcv_wnd;
+ u32 skc_tw_rcv_nxt;
+ };
+ refcount_t skc_refcnt;
+ int skc_dontcopy_end[0];
+ union {
+ u32 skc_rxhash;
+ u32 skc_window_clamp;
+ u32 skc_tw_snd_nxt;
+ };
+};
+
+typedef struct {
+ spinlock_t slock;
+ int owned;
+ wait_queue_head_t wq;
+ struct lockdep_map dep_map;
+} socket_lock_t;
+
+struct sk_buff;
+
+struct sk_buff_head {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+typedef u64 netdev_features_t;
+
+struct sock_cgroup_data {
+ union {
+ struct {
+ u8 is_data;
+ u8 padding;
+ u16 prioidx;
+ u32 classid;
+ };
+ u64 val;
+ };
+};
+
+struct sk_filter;
+
+struct socket_wq;
+
+struct xfrm_policy;
+
+struct dst_entry;
+
+struct socket;
+
+struct sock_reuseport;
+
+struct bpf_sk_storage;
+
+struct sock {
+ struct sock_common __sk_common;
+ socket_lock_t sk_lock;
+ atomic_t sk_drops;
+ int sk_rcvlowat;
+ struct sk_buff_head sk_error_queue;
+ struct sk_buff *sk_rx_skb_cache;
+ struct sk_buff_head sk_receive_queue;
+ struct {
+ atomic_t rmem_alloc;
+ int len;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ } sk_backlog;
+ int sk_forward_alloc;
+ unsigned int sk_ll_usec;
+ unsigned int sk_napi_id;
+ int sk_rcvbuf;
+ struct sk_filter *sk_filter;
+ union {
+ struct socket_wq *sk_wq;
+ struct socket_wq *sk_wq_raw;
+ };
+ struct xfrm_policy *sk_policy[2];
+ struct dst_entry *sk_rx_dst;
+ struct dst_entry *sk_dst_cache;
+ atomic_t sk_omem_alloc;
+ int sk_sndbuf;
+ int sk_wmem_queued;
+ refcount_t sk_wmem_alloc;
+ long unsigned int sk_tsq_flags;
+ union {
+ struct sk_buff *sk_send_head;
+ struct rb_root tcp_rtx_queue;
+ };
+ struct sk_buff *sk_tx_skb_cache;
+ struct sk_buff_head sk_write_queue;
+ __s32 sk_peek_off;
+ int sk_write_pending;
+ __u32 sk_dst_pending_confirm;
+ u32 sk_pacing_status;
+ long int sk_sndtimeo;
+ struct timer_list sk_timer;
+ __u32 sk_priority;
+ __u32 sk_mark;
+ long unsigned int sk_pacing_rate;
+ long unsigned int sk_max_pacing_rate;
+ struct page_frag sk_frag;
+ netdev_features_t sk_route_caps;
+ netdev_features_t sk_route_nocaps;
+ netdev_features_t sk_route_forced_caps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
+ gfp_t sk_allocation;
+ __u32 sk_txhash;
+ u8 sk_padding: 1;
+ u8 sk_kern_sock: 1;
+ u8 sk_no_check_tx: 1;
+ u8 sk_no_check_rx: 1;
+ u8 sk_userlocks: 4;
+ u8 sk_pacing_shift;
+ u16 sk_type;
+ u16 sk_protocol;
+ u16 sk_gso_max_segs;
+ long unsigned int sk_lingertime;
+ struct proto *sk_prot_creator;
+ rwlock_t sk_callback_lock;
+ int sk_err;
+ int sk_err_soft;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
+ kuid_t sk_uid;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
+ long int sk_rcvtimeo;
+ ktime_t sk_stamp;
+ u16 sk_tsflags;
+ u8 sk_shutdown;
+ u32 sk_tskey;
+ atomic_t sk_zckey;
+ u8 sk_clockid;
+ u8 sk_txtime_deadline_mode: 1;
+ u8 sk_txtime_report_errors: 1;
+ u8 sk_txtime_unused: 6;
+ struct socket *sk_socket;
+ void *sk_user_data;
+ void *sk_security;
+ struct sock_cgroup_data sk_cgrp_data;
+ struct mem_cgroup *sk_memcg;
+ void (*sk_state_change)(struct sock *);
+ void (*sk_data_ready)(struct sock *);
+ void (*sk_write_space)(struct sock *);
+ void (*sk_error_report)(struct sock *);
+ int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
+ void (*sk_destruct)(struct sock *);
+ struct sock_reuseport *sk_reuseport_cb;
+ struct bpf_sk_storage *sk_bpf_storage;
+ struct callback_head sk_rcu;
+};
+
+struct rhash_head {
+ struct rhash_head *next;
+};
+
+struct rhashtable;
+
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *, u32, u32);
+
+typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32);
+
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *);
+
+struct rhashtable_params {
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
+ unsigned int max_size;
+ u16 min_size;
+ bool automatic_shrinking;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+struct bucket_table;
+
+struct rhashtable {
+ struct bucket_table *tbl;
+ unsigned int key_len;
+ unsigned int max_elems;
+ struct rhashtable_params p;
+ bool rhlist;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t nelems;
+};
+
+struct rhash_lock_head;
+
+struct bucket_table {
+ unsigned int size;
+ unsigned int nest;
+ u32 hash_rnd;
+ struct list_head walkers;
+ struct callback_head rcu;
+ struct bucket_table *future_tbl;
+ struct lockdep_map dep_map;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct rhash_lock_head *buckets[0];
+};
+
+struct fs_struct {
+ int users;
+ spinlock_t lock;
+ seqcount_t seq;
+ int umask;
+ int in_exec;
+ struct path root;
+ struct path pwd;
+};
+
+struct pipe_buffer;
+
+struct pipe_inode_info {
+ struct mutex mutex;
+ wait_queue_head_t rd_wait;
+ wait_queue_head_t wr_wait;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int max_usage;
+ unsigned int ring_size;
+ unsigned int nr_accounted;
+ unsigned int readers;
+ unsigned int writers;
+ unsigned int files;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct page *tmp_page;
+ struct fasync_struct *fasync_readers;
+ struct fasync_struct *fasync_writers;
+ struct pipe_buffer *bufs;
+ struct user_struct *user;
+};
+
+struct vfsmount {
+ struct dentry *mnt_root;
+ struct super_block *mnt_sb;
+ int mnt_flags;
+};
+
+struct ld_semaphore {
+ atomic_long_t count;
+ raw_spinlock_t wait_lock;
+ unsigned int wait_readers;
+ struct list_head read_wait;
+ struct list_head write_wait;
+ struct lockdep_map dep_map;
+};
+
+typedef unsigned int tcflag_t;
+
+typedef unsigned char cc_t;
+
+typedef unsigned int speed_t;
+
+struct ktermios {
+ tcflag_t c_iflag;
+ tcflag_t c_oflag;
+ tcflag_t c_cflag;
+ tcflag_t c_lflag;
+ cc_t c_line;
+ cc_t c_cc[19];
+ speed_t c_ispeed;
+ speed_t c_ospeed;
+};
+
+struct winsize {
+ short unsigned int ws_row;
+ short unsigned int ws_col;
+ short unsigned int ws_xpixel;
+ short unsigned int ws_ypixel;
+};
+
+struct tty_operations;
+
+struct tty_ldisc;
+
+struct termiox;
+
+struct tty_port;
+
+struct tty_struct {
+ int magic;
+ struct kref kref;
+ struct device *dev;
+ struct tty_driver *driver;
+ const struct tty_operations *ops;
+ int index;
+ struct ld_semaphore ldisc_sem;
+ struct tty_ldisc *ldisc;
+ struct mutex atomic_write_lock;
+ struct mutex legacy_mutex;
+ struct mutex throttle_mutex;
+ struct rw_semaphore termios_rwsem;
+ struct mutex winsize_mutex;
+ spinlock_t ctrl_lock;
+ spinlock_t flow_lock;
+ struct ktermios termios;
+ struct ktermios termios_locked;
+ struct termiox *termiox;
+ char name[64];
+ struct pid *pgrp;
+ struct pid *session;
+ long unsigned int flags;
+ int count;
+ struct winsize winsize;
+ long unsigned int stopped: 1;
+ long unsigned int flow_stopped: 1;
+ int: 30;
+ long unsigned int unused: 62;
+ int hw_stopped;
+ long unsigned int ctrl_status: 8;
+ long unsigned int packet: 1;
+ int: 23;
+ long unsigned int unused_ctrl: 55;
+ unsigned int receive_room;
+ int flow_change;
+ struct tty_struct *link;
+ struct fasync_struct *fasync;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t read_wait;
+ struct work_struct hangup_work;
+ void *disc_data;
+ void *driver_data;
+ spinlock_t files_lock;
+ struct list_head tty_files;
+ int closing;
+ unsigned char *write_buf;
+ int write_cnt;
+ struct work_struct SAK_work;
+ struct tty_port *port;
+};
+
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
+struct iovec;
+
+struct kvec;
+
+struct iov_iter {
+ u8 iter_type;
+ bool nofault;
+ bool data_source;
+ bool user_backed;
+ union {
+ size_t iov_offset;
+ int last_offset;
+ };
+ size_t count;
+ union {
+ const struct iovec *iov;
+ const struct kvec *kvec;
+ const struct bio_vec *bvec;
+ struct xarray *xarray;
+ struct pipe_inode_info *pipe;
+ void *ubuf;
+ };
+ union {
+ unsigned long nr_segs;
+ struct {
+ unsigned int head;
+ unsigned int start_head;
+ };
+ loff_t xarray_start;
+ };
+};
+
+struct posix_acl_entry {
+ short int e_tag;
+ short unsigned int e_perm;
+ union {
+ kuid_t e_uid;
+ kgid_t e_gid;
+ };
+};
+
+struct posix_acl {
+ refcount_t a_refcount;
+ struct callback_head a_rcu;
+ unsigned int a_count;
+ struct posix_acl_entry a_entries[0];
+};
+
+struct termios {
+ tcflag_t c_iflag;
+ tcflag_t c_oflag;
+ tcflag_t c_cflag;
+ tcflag_t c_lflag;
+ cc_t c_line;
+ cc_t c_cc[19];
+};
+
+struct termiox {
+ __u16 x_hflag;
+ __u16 x_cflag;
+ __u16 x_rflag[5];
+ __u16 x_sflag;
+};
+
+struct serial_icounter_struct;
+
+struct serial_struct;
+
+struct tty_operations {
+ struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int);
+ int (*install)(struct tty_driver *, struct tty_struct *);
+ void (*remove)(struct tty_driver *, struct tty_struct *);
+ int (*open)(struct tty_struct *, struct file *);
+ void (*close)(struct tty_struct *, struct file *);
+ void (*shutdown)(struct tty_struct *);
+ void (*cleanup)(struct tty_struct *);
+ int (*write)(struct tty_struct *, const unsigned char *, int);
+ int (*put_char)(struct tty_struct *, unsigned char);
+ void (*flush_chars)(struct tty_struct *);
+ int (*write_room)(struct tty_struct *);
+ int (*chars_in_buffer)(struct tty_struct *);
+ int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int);
+ long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int);
+ void (*set_termios)(struct tty_struct *, struct ktermios *);
+ void (*throttle)(struct tty_struct *);
+ void (*unthrottle)(struct tty_struct *);
+ void (*stop)(struct tty_struct *);
+ void (*start)(struct tty_struct *);
+ void (*hangup)(struct tty_struct *);
+ int (*break_ctl)(struct tty_struct *, int);
+ void (*flush_buffer)(struct tty_struct *);
+ void (*set_ldisc)(struct tty_struct *);
+ void (*wait_until_sent)(struct tty_struct *, int);
+ void (*send_xchar)(struct tty_struct *, char);
+ int (*tiocmget)(struct tty_struct *);
+ int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int);
+ int (*resize)(struct tty_struct *, struct winsize *);
+ int (*set_termiox)(struct tty_struct *, struct termiox *);
+ int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *);
+ int (*get_serial)(struct tty_struct *, struct serial_struct *);
+ int (*set_serial)(struct tty_struct *, struct serial_struct *);
+ void (*show_fdinfo)(struct tty_struct *, struct seq_file *);
+ int (*proc_show)(struct seq_file *, void *);
+};
+
+struct proc_dir_entry;
+
+struct tty_driver {
+ int magic;
+ struct kref kref;
+ struct cdev **cdevs;
+ struct module *owner;
+ const char *driver_name;
+ const char *name;
+ int name_base;
+ int major;
+ int minor_start;
+ unsigned int num;
+ short int type;
+ short int subtype;
+ struct ktermios init_termios;
+ long unsigned int flags;
+ struct proc_dir_entry *proc_entry;
+ struct tty_driver *other;
+ struct tty_struct **ttys;
+ struct tty_port **ports;
+ struct ktermios **termios;
+ void *driver_state;
+ const struct tty_operations *ops;
+ struct list_head tty_drivers;
+};
+
+struct tty_buffer {
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
+ int used;
+ int size;
+ int commit;
+ int read;
+ int flags;
+ long unsigned int data[0];
+};
+
+struct tty_bufhead {
+ struct tty_buffer *head;
+ struct work_struct work;
+ struct mutex lock;
+ atomic_t priority;
+ struct tty_buffer sentinel;
+ struct llist_head free;
+ atomic_t mem_used;
+ int mem_limit;
+ struct tty_buffer *tail;
+};
+
+struct tty_port_operations;
+
+struct tty_port_client_operations;
+
+struct tty_port {
+ struct tty_bufhead buf;
+ struct tty_struct *tty;
+ struct tty_struct *itty;
+ const struct tty_port_operations *ops;
+ const struct tty_port_client_operations *client_ops;
+ spinlock_t lock;
+ int blocked_open;
+ int count;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+ long unsigned int flags;
+ long unsigned int iflags;
+ unsigned char console: 1;
+ unsigned char low_latency: 1;
+ struct mutex mutex;
+ struct mutex buf_mutex;
+ unsigned char *xmit_buf;
+ unsigned int close_delay;
+ unsigned int closing_wait;
+ int drain_delay;
+ struct kref kref;
+ void *client_data;
+};
+
+struct tty_ldisc_ops {
+ int magic;
+ char *name;
+ int num;
+ int flags;
+ int (*open)(struct tty_struct *);
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *);
+ ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t);
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t);
+ int (*ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int);
+ int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int);
+ void (*set_termios)(struct tty_struct *, struct ktermios *);
+ __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *);
+ int (*hangup)(struct tty_struct *);
+ void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int);
+ void (*write_wakeup)(struct tty_struct *);
+ void (*dcd_change)(struct tty_struct *, unsigned int);
+ int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int);
+ struct module *owner;
+ int refcount;
+};
+
+struct tty_ldisc {
+ struct tty_ldisc_ops *ops;
+ struct tty_struct *tty;
+};
+
+struct tty_port_operations {
+ int (*carrier_raised)(struct tty_port *);
+ void (*dtr_rts)(struct tty_port *, int);
+ void (*shutdown)(struct tty_port *);
+ int (*activate)(struct tty_port *, struct tty_struct *);
+ void (*destruct)(struct tty_port *);
+};
+
+struct tty_port_client_operations {
+ int (*receive_buf)(struct tty_port *, const unsigned char *, const unsigned char *, size_t);
+ void (*write_wakeup)(struct tty_port *);
+};
+
+struct prot_inuse;
+
+struct netns_core {
+ struct ctl_table_header *sysctl_hdr;
+ int sysctl_somaxconn;
+ int *sock_inuse;
+ struct prot_inuse *prot_inuse;
+};
+
+struct tcp_mib;
+
+struct ipstats_mib;
+
+struct linux_mib;
+
+struct udp_mib;
+
+struct icmp_mib;
+
+struct icmpmsg_mib;
+
+struct icmpv6_mib;
+
+struct icmpv6msg_mib;
+
+struct linux_tls_mib;
+
+struct netns_mib {
+ struct tcp_mib *tcp_statistics;
+ struct ipstats_mib *ip_statistics;
+ struct linux_mib *net_statistics;
+ struct udp_mib *udp_statistics;
+ struct udp_mib *udplite_statistics;
+ struct icmp_mib *icmp_statistics;
+ struct icmpmsg_mib *icmpmsg_statistics;
+ struct proc_dir_entry *proc_net_devsnmp6;
+ struct udp_mib *udp_stats_in6;
+ struct udp_mib *udplite_stats_in6;
+ struct ipstats_mib *ipv6_statistics;
+ struct icmpv6_mib *icmpv6_statistics;
+ struct icmpv6msg_mib *icmpv6msg_statistics;
+ struct linux_tls_mib *tls_statistics;
+};
+
+struct netns_packet {
+ struct mutex sklist_lock;
+ struct hlist_head sklist;
+};
+
+struct netns_unix {
+ int sysctl_max_dgram_qlen;
+ struct ctl_table_header *ctl;
+};
+
+struct netns_nexthop {
+ struct rb_root rb_root;
+ struct hlist_head *devhash;
+ unsigned int seq;
+ u32 last_id_allocated;
+ struct atomic_notifier_head notifier_chain;
+};
+
+struct local_ports {
+ seqlock_t lock;
+ int range[2];
+ bool warned;
+};
+
+struct inet_hashinfo;
+
+struct inet_timewait_death_row {
+ atomic_t tw_count;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct inet_hashinfo *hashinfo;
+ int sysctl_max_tw_buckets;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct ping_group_range {
+ seqlock_t lock;
+ kgid_t range[2];
+};
+
+typedef struct {
+ u64 key[2];
+} siphash_key_t;
+
+struct ipv4_devconf;
+
+struct ip_ra_chain;
+
+struct inet_peer_base;
+
+struct fqdir;
+
+struct xt_table;
+
+struct tcp_congestion_ops;
+
+struct tcp_fastopen_context;
+
+struct fib_notifier_ops;
+
+struct netns_ipv4 {
+ struct ctl_table_header *forw_hdr;
+ struct ctl_table_header *frags_hdr;
+ struct ctl_table_header *ipv4_hdr;
+ struct ctl_table_header *route_hdr;
+ struct ctl_table_header *xfrm4_hdr;
+ struct ipv4_devconf *devconf_all;
+ struct ipv4_devconf *devconf_dflt;
+ struct ip_ra_chain *ra_chain;
+ struct mutex ra_mutex;
+ bool fib_has_custom_local_routes;
+ int fib_num_tclassid_users;
+ struct hlist_head *fib_table_hash;
+ bool fib_offload_disabled;
+ struct sock *fibnl;
+ struct sock **icmp_sk;
+ struct sock *mc_autojoin_sk;
+ struct inet_peer_base *peers;
+ struct sock **tcp_sk;
+ struct fqdir *fqdir;
+ struct xt_table *iptable_filter;
+ struct xt_table *iptable_mangle;
+ struct xt_table *iptable_raw;
+ struct xt_table *arptable_filter;
+ struct xt_table *iptable_security;
+ struct xt_table *nat_table;
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
+ int sysctl_icmp_ignore_bogus_error_responses;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+ int sysctl_icmp_errors_use_inbound_ifaddr;
+ struct local_ports ip_local_ports;
+ int sysctl_tcp_ecn;
+ int sysctl_tcp_ecn_fallback;
+ int sysctl_ip_default_ttl;
+ int sysctl_ip_no_pmtu_disc;
+ int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_fwd_update_priority;
+ int sysctl_ip_nonlocal_bind;
+ int sysctl_ip_autobind_reuse;
+ int sysctl_ip_dynaddr;
+ int sysctl_ip_early_demux;
+ int sysctl_tcp_early_demux;
+ int sysctl_udp_early_demux;
+ int sysctl_nexthop_compat_mode;
+ int sysctl_fwmark_reflect;
+ int sysctl_tcp_fwmark_accept;
+ int sysctl_tcp_mtu_probing;
+ int sysctl_tcp_mtu_probe_floor;
+ int sysctl_tcp_base_mss;
+ int sysctl_tcp_min_snd_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
+ int sysctl_tcp_keepalive_time;
+ int sysctl_tcp_keepalive_probes;
+ int sysctl_tcp_keepalive_intvl;
+ int sysctl_tcp_syn_retries;
+ int sysctl_tcp_synack_retries;
+ int sysctl_tcp_syncookies;
+ int sysctl_tcp_reordering;
+ int sysctl_tcp_retries1;
+ int sysctl_tcp_retries2;
+ int sysctl_tcp_orphan_retries;
+ int sysctl_tcp_fin_timeout;
+ unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_tw_reuse;
+ int sysctl_tcp_sack;
+ int sysctl_tcp_window_scaling;
+ int sysctl_tcp_timestamps;
+ int sysctl_tcp_early_retrans;
+ int sysctl_tcp_recovery;
+ int sysctl_tcp_thin_linear_timeouts;
+ int sysctl_tcp_slow_start_after_idle;
+ int sysctl_tcp_retrans_collapse;
+ int sysctl_tcp_stdurg;
+ int sysctl_tcp_rfc1337;
+ int sysctl_tcp_abort_on_overflow;
+ int sysctl_tcp_fack;
+ int sysctl_tcp_max_reordering;
+ int sysctl_tcp_dsack;
+ int sysctl_tcp_app_win;
+ int sysctl_tcp_adv_win_scale;
+ int sysctl_tcp_frto;
+ int sysctl_tcp_nometrics_save;
+ int sysctl_tcp_no_ssthresh_metrics_save;
+ int sysctl_tcp_moderate_rcvbuf;
+ int sysctl_tcp_tso_win_divisor;
+ int sysctl_tcp_workaround_signed_windows;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_challenge_ack_limit;
+ int sysctl_tcp_min_tso_segs;
+ int sysctl_tcp_min_rtt_wlen;
+ int sysctl_tcp_autocorking;
+ int sysctl_tcp_invalid_ratelimit;
+ int sysctl_tcp_pacing_ss_ratio;
+ int sysctl_tcp_pacing_ca_ratio;
+ int sysctl_tcp_wmem[3];
+ int sysctl_tcp_rmem[3];
+ int sysctl_tcp_comp_sack_nr;
+ long unsigned int sysctl_tcp_comp_sack_delay_ns;
+ long unsigned int sysctl_tcp_comp_sack_slack_ns;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct inet_timewait_death_row tcp_death_row;
+ int sysctl_max_syn_backlog;
+ int sysctl_tcp_fastopen;
+ const struct tcp_congestion_ops *tcp_congestion_control;
+ struct tcp_fastopen_context *tcp_fastopen_ctx;
+ spinlock_t tcp_fastopen_ctx_lock;
+ unsigned int sysctl_tcp_fastopen_blackhole_timeout;
+ atomic_t tfo_active_disable_times;
+ long unsigned int tfo_active_disable_stamp;
+ int sysctl_udp_wmem_min;
+ int sysctl_udp_rmem_min;
+ int sysctl_igmp_max_memberships;
+ int sysctl_igmp_max_msf;
+ int sysctl_igmp_llm_reports;
+ int sysctl_igmp_qrv;
+ struct ping_group_range ping_group_range;
+ atomic_t dev_addr_genid;
+ long unsigned int *sysctl_local_reserved_ports;
+ int sysctl_ip_prot_sock;
+ struct fib_notifier_ops *notifier_ops;
+ unsigned int fib_seq;
+ struct fib_notifier_ops *ipmr_notifier_ops;
+ unsigned int ipmr_seq;
+ atomic_t rt_genid;
+ siphash_key_t ip_id_key;
+};
+
+struct netns_sysctl_ipv6 {
+ struct ctl_table_header *hdr;
+ struct ctl_table_header *route_hdr;
+ struct ctl_table_header *icmp_hdr;
+ struct ctl_table_header *frags_hdr;
+ struct ctl_table_header *xfrm6_hdr;
+ int bindv6only;
+ int flush_delay;
+ int ip6_rt_max_size;
+ int ip6_rt_gc_min_interval;
+ int ip6_rt_gc_timeout;
+ int ip6_rt_gc_interval;
+ int ip6_rt_gc_elasticity;
+ int ip6_rt_mtu_expires;
+ int ip6_rt_min_advmss;
+ int multipath_hash_policy;
+ int flowlabel_consistency;
+ int auto_flowlabels;
+ int icmpv6_time;
+ int icmpv6_echo_ignore_all;
+ int icmpv6_echo_ignore_multicast;
+ int icmpv6_echo_ignore_anycast;
+ long unsigned int icmpv6_ratemask[4];
+ long unsigned int *icmpv6_ratemask_ptr;
+ int anycast_src_echo_reply;
+ int ip_nonlocal_bind;
+ int fwmark_reflect;
+ int idgen_retries;
+ int idgen_delay;
+ int flowlabel_state_ranges;
+ int flowlabel_reflect;
+ int max_dst_opts_cnt;
+ int max_hbh_opts_cnt;
+ int max_dst_opts_len;
+ int max_hbh_opts_len;
+ int seg6_flowlabel;
+ bool skip_notify_on_dev_down;
+};
+
+struct neighbour;
+
+struct dst_ops {
+ short unsigned int family;
+ unsigned int gc_thresh;
+ int (*gc)(struct dst_ops *);
+ struct dst_entry * (*check)(struct dst_entry *, __u32);
+ unsigned int (*default_advmss)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, long unsigned int);
+ void (*destroy)(struct dst_entry *);
+ void (*ifdown)(struct dst_entry *, struct net_device *, int);
+ struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*link_failure)(struct sk_buff *);
+ void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool);
+ void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);
+ int (*local_out)(struct net *, struct sock *, struct sk_buff *);
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);
+ void (*confirm_neigh)(const struct dst_entry *, const void *);
+ struct kmem_cache *kmem_cachep;
+ struct percpu_counter pcpuc_entries;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct ipv6_devconf;
+
+struct fib6_info;
+
+struct rt6_info;
+
+struct rt6_statistics;
+
+struct fib6_table;
+
+struct seg6_pernet_data;
+
+struct netns_ipv6 {
+ struct netns_sysctl_ipv6 sysctl;
+ struct ipv6_devconf *devconf_all;
+ struct ipv6_devconf *devconf_dflt;
+ struct inet_peer_base *peers;
+ struct fqdir *fqdir;
+ struct xt_table *ip6table_filter;
+ struct xt_table *ip6table_mangle;
+ struct xt_table *ip6table_raw;
+ struct xt_table *ip6table_security;
+ struct xt_table *ip6table_nat;
+ struct fib6_info *fib6_null_entry;
+ struct rt6_info *ip6_null_entry;
+ struct rt6_statistics *rt6_stats;
+ struct timer_list ip6_fib_timer;
+ struct hlist_head *fib_table_hash;
+ struct fib6_table *fib6_main_tbl;
+ struct list_head fib6_walkers;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct dst_ops ip6_dst_ops;
+ rwlock_t fib6_walker_lock;
+ spinlock_t fib6_gc_lock;
+ unsigned int ip6_rt_gc_expire;
+ long unsigned int ip6_rt_last_gc;
+ struct sock **icmp_sk;
+ struct sock *ndisc_sk;
+ struct sock *tcp_sk;
+ struct sock *igmp_sk;
+ struct sock *mc_autojoin_sk;
+ atomic_t dev_addr_genid;
+ atomic_t fib6_sernum;
+ struct seg6_pernet_data *seg6_data;
+ struct fib_notifier_ops *notifier_ops;
+ struct fib_notifier_ops *ip6mr_notifier_ops;
+ unsigned int ipmr_seq;
+ struct {
+ struct hlist_head head;
+ spinlock_t lock;
+ u32 seq;
+ } ip6addrlbl_table;
+ long: 64;
+ long: 64;
+};
+
+struct nf_queue_handler;
+
+struct nf_logger;
+
+struct nf_hook_entries;
+
+struct netns_nf {
+ struct proc_dir_entry *proc_netfilter;
+ const struct nf_queue_handler *queue_handler;
+ const struct nf_logger *nf_loggers[13];
+ struct ctl_table_header *nf_log_dir_header;
+ struct nf_hook_entries *hooks_ipv4[5];
+ struct nf_hook_entries *hooks_ipv6[5];
+ struct nf_hook_entries *hooks_arp[3];
+ bool defrag_ipv4;
+ bool defrag_ipv6;
+};
+
+struct netns_xt {
+ struct list_head tables[13];
+ bool notrack_deprecated_warning;
+ bool clusterip_deprecated_warning;
+};
+
+struct nf_generic_net {
+ unsigned int timeout;
+};
+
+struct nf_tcp_net {
+ unsigned int timeouts[14];
+ int tcp_loose;
+ int tcp_be_liberal;
+ int tcp_max_retrans;
+};
+
+struct nf_udp_net {
+ unsigned int timeouts[2];
+};
+
+struct nf_icmp_net {
+ unsigned int timeout;
+};
+
+struct nf_dccp_net {
+ int dccp_loose;
+ unsigned int dccp_timeout[10];
+};
+
+struct nf_sctp_net {
+ unsigned int timeouts[10];
+};
+
+struct nf_gre_net {
+ struct list_head keymap_list;
+ unsigned int timeouts[2];
+};
+
+struct nf_ip_net {
+ struct nf_generic_net generic;
+ struct nf_tcp_net tcp;
+ struct nf_udp_net udp;
+ struct nf_icmp_net icmp;
+ struct nf_icmp_net icmpv6;
+ struct nf_dccp_net dccp;
+ struct nf_sctp_net sctp;
+ struct nf_gre_net gre;
+};
+
+struct ct_pcpu;
+
+struct ip_conntrack_stat;
+
+struct nf_ct_event_notifier;
+
+struct nf_exp_event_notifier;
+
+struct netns_ct {
+ atomic_t count;
+ unsigned int expect_count;
+ struct delayed_work ecache_dwork;
+ bool ecache_dwork_pending;
+ bool auto_assign_helper_warned;
+ struct ctl_table_header *sysctl_header;
+ unsigned int sysctl_log_invalid;
+ int sysctl_events;
+ int sysctl_acct;
+ int sysctl_auto_assign_helper;
+ int sysctl_tstamp;
+ int sysctl_checksum;
+ struct ct_pcpu *pcpu_lists;
+ struct ip_conntrack_stat *stat;
+ struct nf_ct_event_notifier *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier *nf_expect_event_cb;
+ struct nf_ip_net nf_ct_proto;
+ unsigned int labels_used;
+};
+
+struct netns_nf_frag {
+ struct fqdir *fqdir;
+};
+
+struct bpf_link;
+
+struct netns_bpf {
+ struct bpf_prog *progs[1];
+ struct bpf_link *links[1];
+};
+
+struct xfrm_policy_hash {
+ struct hlist_head *table;
+ unsigned int hmask;
+ u8 dbits4;
+ u8 sbits4;
+ u8 dbits6;
+ u8 sbits6;
+};
+
+struct xfrm_policy_hthresh {
+ struct work_struct work;
+ seqlock_t lock;
+ u8 lbits4;
+ u8 rbits4;
+ u8 lbits6;
+ u8 rbits6;
+};
+
+struct netns_xfrm {
+ struct list_head state_all;
+ struct hlist_head *state_bydst;
+ struct hlist_head *state_bysrc;
+ struct hlist_head *state_byspi;
+ unsigned int state_hmask;
+ unsigned int state_num;
+ struct work_struct state_hash_work;
+ struct list_head policy_all;
+ struct hlist_head *policy_byidx;
+ unsigned int policy_idx_hmask;
+ struct hlist_head policy_inexact[3];
+ struct xfrm_policy_hash policy_bydst[3];
+ unsigned int policy_count[6];
+ struct work_struct policy_hash_work;
+ struct xfrm_policy_hthresh policy_hthresh;
+ struct list_head inexact_bins;
+ struct sock *nlsk;
+ struct sock *nlsk_stash;
+ u32 sysctl_aevent_etime;
+ u32 sysctl_aevent_rseqth;
+ int sysctl_larval_drop;
+ u32 sysctl_acq_expires;
+ struct ctl_table_header *sysctl_hdr;
+ long: 64;
+ struct dst_ops xfrm4_dst_ops;
+ struct dst_ops xfrm6_dst_ops;
+ spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_policy_lock;
+ struct mutex xfrm_cfg_mutex;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct netns_xdp {
+ struct mutex lock;
+ struct hlist_head list;
+};
+
+struct uevent_sock;
+
+struct net_generic;
+
+struct netns_ipvs;
+
+struct net {
+ refcount_t passive;
+ refcount_t count;
+ spinlock_t rules_mod_lock;
+ unsigned int dev_unreg_count;
+ unsigned int dev_base_seq;
+ int ifindex;
+ spinlock_t nsid_lock;
+ atomic_t fnhe_genid;
+ struct list_head list;
+ struct list_head exit_list;
+ struct llist_node cleanup_list;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct idr netns_ids;
+ struct ns_common ns;
+ struct list_head dev_base_head;
+ struct proc_dir_entry *proc_net;
+ struct proc_dir_entry *proc_net_stat;
+ struct ctl_table_set sysctls;
+ struct sock *rtnl;
+ struct sock *genl_sock;
+ struct uevent_sock *uevent_sock;
+ struct hlist_head *dev_name_head;
+ struct hlist_head *dev_index_head;
+ struct raw_notifier_head netdev_chain;
+ u32 hash_mix;
+ struct net_device *loopback_dev;
+ struct list_head rules_ops;
+ struct netns_core core;
+ struct netns_mib mib;
+ struct netns_packet packet;
+ struct netns_unix unx;
+ struct netns_nexthop nexthop;
+ long: 64;
+ long: 64;
+ struct netns_ipv4 ipv4;
+ struct netns_ipv6 ipv6;
+ struct netns_nf nf;
+ struct netns_xt xt;
+ struct netns_ct ct;
+ struct netns_nf_frag nf_frag;
+ struct ctl_table_header *nf_frag_frags_hdr;
+ struct sock *nfnl;
+ struct sock *nfnl_stash;
+ struct list_head nfnl_acct_list;
+ struct list_head nfct_timeout_list;
+ struct net_generic *gen;
+ struct netns_bpf bpf;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct netns_xfrm xfrm;
+ atomic64_t net_cookie;
+ struct netns_ipvs *ipvs;
+ struct netns_xdp xdp;
+ struct sock *diag_nlsk;
+ long: 64;
+ long: 64;
+};
+
+typedef struct {
+ local64_t v;
+} u64_stats_t;
+
+enum bpf_link_type {
+ BPF_LINK_TYPE_UNSPEC = 0,
+ BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
+ BPF_LINK_TYPE_TRACING = 2,
+ BPF_LINK_TYPE_CGROUP = 3,
+ BPF_LINK_TYPE_ITER = 4,
+ BPF_LINK_TYPE_NETNS = 5,
+ MAX_BPF_LINK_TYPE = 6,
+};
+
+struct bpf_link_info {
+ __u32 type;
+ __u32 id;
+ __u32 prog_id;
+ union {
+ struct {
+ __u64 tp_name;
+ __u32 tp_name_len;
+ } raw_tracepoint;
+ struct {
+ __u32 attach_type;
+ } tracing;
+ struct {
+ __u64 cgroup_id;
+ __u32 attach_type;
+ } cgroup;
+ struct {
+ __u32 netns_ino;
+ __u32 attach_type;
+ } netns;
+ };
+};
+
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+ int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *);
+ int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *);
+ int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64);
+ int (*map_delete_elem)(struct bpf_offloaded_map *, void *);
+};
+
+struct bpf_offloaded_map {
+ struct bpf_map map;
+ struct net_device *netdev;
+ const struct bpf_map_dev_ops *dev_ops;
+ void *dev_priv;
+ struct list_head offloads;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct net_device_stats {
+ long unsigned int rx_packets;
+ long unsigned int tx_packets;
+ long unsigned int rx_bytes;
+ long unsigned int tx_bytes;
+ long unsigned int rx_errors;
+ long unsigned int tx_errors;
+ long unsigned int rx_dropped;
+ long unsigned int tx_dropped;
+ long unsigned int multicast;
+ long unsigned int collisions;
+ long unsigned int rx_length_errors;
+ long unsigned int rx_over_errors;
+ long unsigned int rx_crc_errors;
+ long unsigned int rx_frame_errors;
+ long unsigned int rx_fifo_errors;
+ long unsigned int rx_missed_errors;
+ long unsigned int tx_aborted_errors;
+ long unsigned int tx_carrier_errors;
+ long unsigned int tx_fifo_errors;
+ long unsigned int tx_heartbeat_errors;
+ long unsigned int tx_window_errors;
+ long unsigned int rx_compressed;
+ long unsigned int tx_compressed;
+};
+
+struct netdev_hw_addr_list {
+ struct list_head list;
+ int count;
+};
+
+struct wireless_dev;
+
+enum rx_handler_result {
+ RX_HANDLER_CONSUMED = 0,
+ RX_HANDLER_ANOTHER = 1,
+ RX_HANDLER_EXACT = 2,
+ RX_HANDLER_PASS = 3,
+};
+
+typedef enum rx_handler_result rx_handler_result_t;
+
+typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
+
+struct pcpu_dstats;
+
+struct netdev_tc_txq {
+ u16 count;
+ u16 offset;
+};
+
+struct sfp_bus;
+
+struct netdev_name_node;
+
+struct dev_ifalias;
+
+struct net_device_ops;
+
+struct ethtool_ops;
+
+struct ndisc_ops;
+
+struct header_ops;
+
+struct in_device;
+
+struct inet6_dev;
+
+struct wpan_dev;
+
+struct netdev_rx_queue;
+
+struct netdev_queue;
+
+struct cpu_rmap;
+
+struct Qdisc;
+
+struct xdp_dev_bulk_queue;
+
+struct xps_dev_maps;
+
+struct pcpu_lstats;
+
+struct pcpu_sw_netstats;
+
+struct rtnl_link_ops;
+
+struct netprio_map;
+
+struct phy_device;
+
+struct net_device {
+ char name[16];
+ struct netdev_name_node *name_node;
+ struct dev_ifalias *ifalias;
+ long unsigned int mem_end;
+ long unsigned int mem_start;
+ long unsigned int base_addr;
+ int irq;
+ long unsigned int state;
+ struct list_head dev_list;
+ struct list_head napi_list;
+ struct list_head unreg_list;
+ struct list_head close_list;
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } adj_list;
+ netdev_features_t features;
+ netdev_features_t hw_features;
+ netdev_features_t wanted_features;
+ netdev_features_t vlan_features;
+ netdev_features_t hw_enc_features;
+ netdev_features_t mpls_features;
+ netdev_features_t gso_partial_features;
+ int ifindex;
+ int group;
+ struct net_device_stats stats;
+ atomic_long_t rx_dropped;
+ atomic_long_t tx_dropped;
+ atomic_long_t rx_nohandler;
+ atomic_t carrier_up_count;
+ atomic_t carrier_down_count;
+ const struct net_device_ops *netdev_ops;
+ const struct ethtool_ops *ethtool_ops;
+ const struct ndisc_ops *ndisc_ops;
+ const struct header_ops *header_ops;
+ unsigned int flags;
+ unsigned int priv_flags;
+ short unsigned int gflags;
+ short unsigned int padded;
+ unsigned char operstate;
+ unsigned char link_mode;
+ unsigned char if_port;
+ unsigned char dma;
+ unsigned int mtu;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ short unsigned int type;
+ short unsigned int hard_header_len;
+ unsigned char min_header_len;
+ short unsigned int needed_headroom;
+ short unsigned int needed_tailroom;
+ unsigned char perm_addr[32];
+ unsigned char addr_assign_type;
+ unsigned char addr_len;
+ unsigned char upper_level;
+ unsigned char lower_level;
+ short unsigned int neigh_priv_len;
+ short unsigned int dev_id;
+ short unsigned int dev_port;
+ spinlock_t addr_list_lock;
+ unsigned char name_assign_type;
+ bool uc_promisc;
+ struct netdev_hw_addr_list uc;
+ struct netdev_hw_addr_list mc;
+ struct netdev_hw_addr_list dev_addrs;
+ struct kset *queues_kset;
+ unsigned int promiscuity;
+ unsigned int allmulti;
+ struct in_device *ip_ptr;
+ struct inet6_dev *ip6_ptr;
+ struct wireless_dev *ieee80211_ptr;
+ struct wpan_dev *ieee802154_ptr;
+ unsigned char *dev_addr;
+ struct netdev_rx_queue *_rx;
+ unsigned int num_rx_queues;
+ unsigned int real_num_rx_queues;
+ struct bpf_prog *xdp_prog;
+ long unsigned int gro_flush_timeout;
+ int napi_defer_hard_irqs;
+ rx_handler_func_t *rx_handler;
+ void *rx_handler_data;
+ struct netdev_queue *ingress_queue;
+ struct nf_hook_entries *nf_hooks_ingress;
+ unsigned char broadcast[32];
+ struct cpu_rmap *rx_cpu_rmap;
+ struct hlist_node index_hlist;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct netdev_queue *_tx;
+ unsigned int num_tx_queues;
+ unsigned int real_num_tx_queues;
+ struct Qdisc *qdisc;
+ unsigned int tx_queue_len;
+ spinlock_t tx_global_lock;
+ struct xdp_dev_bulk_queue *xdp_bulkq;
+ struct xps_dev_maps *xps_cpus_map;
+ struct xps_dev_maps *xps_rxqs_map;
+ struct hlist_head qdisc_hash[16];
+ struct timer_list watchdog_timer;
+ int watchdog_timeo;
+ struct list_head todo_list;
+ int *pcpu_refcnt;
+ struct list_head link_watch_list;
+ enum {
+ NETREG_UNINITIALIZED = 0,
+ NETREG_REGISTERED = 1,
+ NETREG_UNREGISTERING = 2,
+ NETREG_UNREGISTERED = 3,
+ NETREG_RELEASED = 4,
+ NETREG_DUMMY = 5,
+ } reg_state: 8;
+ bool dismantle;
+ enum {
+ RTNL_LINK_INITIALIZED = 0,
+ RTNL_LINK_INITIALIZING = 1,
+ } rtnl_link_state: 16;
+ bool needs_free_netdev;
+ void (*priv_destructor)(struct net_device *);
+ possible_net_t nd_net;
+ union {
+ void *ml_priv;
+ struct pcpu_lstats *lstats;
+ struct pcpu_sw_netstats *tstats;
+ struct pcpu_dstats *dstats;
+ };
+ struct device dev;
+ const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_rx_queue_group;
+ const struct rtnl_link_ops *rtnl_link_ops;
+ unsigned int gso_max_size;
+ u16 gso_max_segs;
+ s16 num_tc;
+ struct netdev_tc_txq tc_to_txq[16];
+ u8 prio_tc_map[16];
+ struct netprio_map *priomap;
+ struct phy_device *phydev;
+ struct sfp_bus *sfp_bus;
+ struct lock_class_key *qdisc_tx_busylock;
+ struct lock_class_key *qdisc_running_key;
+ bool proto_down;
+ unsigned int wol_enabled: 1;
+ struct list_head net_notifier_list;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[48];
+ int num_progs;
+ void *image;
+ u32 image_off;
+ struct bpf_ksym ksym;
+};
+
+struct bpf_link_ops;
+
+struct bpf_link {
+ atomic64_t refcnt;
+ u32 id;
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *);
+ void (*dealloc)(struct bpf_link *);
+ int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *);
+ void (*show_fdinfo)(const struct bpf_link *, struct seq_file *);
+ int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *);
+};
+
+typedef unsigned int sk_buff_data_t;
+
+struct skb_ext;
+
+struct sk_buff {
+ union {
+ struct {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ union {
+ struct net_device *dev;
+ long unsigned int dev_scratch;
+ };
+ };
+ struct rb_node rbnode;
+ struct list_head list;
+ };
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
+ };
+ union {
+ ktime_t tstamp;
+ u64 skb_mstamp_ns;
+ };
+ char cb[48];
+ union {
+ struct {
+ long unsigned int _skb_refdst;
+ void (*destructor)(struct sk_buff *);
+ };
+ struct list_head tcp_tsorted_anchor;
+ };
+ long unsigned int _nfct;
+ unsigned int len;
+ unsigned int data_len;
+ __u16 mac_len;
+ __u16 hdr_len;
+ __u16 queue_mapping;
+ __u8 __cloned_offset[0];
+ __u8 cloned: 1;
+ __u8 nohdr: 1;
+ __u8 fclone: 2;
+ __u8 peeked: 1;
+ __u8 head_frag: 1;
+ __u8 pfmemalloc: 1;
+ __u8 active_extensions;
+ __u32 headers_start[0];
+ __u8 __pkt_type_offset[0];
+ __u8 pkt_type: 3;
+ __u8 ignore_df: 1;
+ __u8 nf_trace: 1;
+ __u8 ip_summed: 2;
+ __u8 ooo_okay: 1;
+ __u8 l4_hash: 1;
+ __u8 sw_hash: 1;
+ __u8 wifi_acked_valid: 1;
+ __u8 wifi_acked: 1;
+ __u8 no_fcs: 1;
+ __u8 encapsulation: 1;
+ __u8 encap_hdr_csum: 1;
+ __u8 csum_valid: 1;
+ __u8 __pkt_vlan_present_offset[0];
+ __u8 vlan_present: 1;
+ __u8 csum_complete_sw: 1;
+ __u8 csum_level: 2;
+ __u8 csum_not_inet: 1;
+ __u8 dst_pending_confirm: 1;
+ __u8 ndisc_nodetype: 2;
+ __u8 ipvs_property: 1;
+ __u8 inner_protocol_type: 1;
+ __u8 remcsum_offload: 1;
+ __u8 offload_fwd_mark: 1;
+ __u8 offload_l3_fwd_mark: 1;
+ __u16 tc_index;
+ union {
+ __wsum csum;
+ struct {
+ __u16 csum_start;
+ __u16 csum_offset;
+ };
+ };
+ __u32 priority;
+ int skb_iif;
+ __u32 hash;
+ __be16 vlan_proto;
+ __u16 vlan_tci;
+ union {
+ unsigned int napi_id;
+ unsigned int sender_cpu;
+ };
+ union {
+ __u32 mark;
+ __u32 reserved_tailroom;
+ };
+ union {
+ __be16 inner_protocol;
+ __u8 inner_ipproto;
+ };
+ __u16 inner_transport_header;
+ __u16 inner_network_header;
+ __u16 inner_mac_header;
+ __be16 protocol;
+ __u16 transport_header;
+ __u16 network_header;
+ __u16 mac_header;
+ __u32 headers_end[0];
+ sk_buff_data_t tail;
+ sk_buff_data_t end;
+ unsigned char *head;
+ unsigned char *data;
+ unsigned int truesize;
+ refcount_t users;
+ struct skb_ext *extensions;
+};
+
+struct scatterlist {
+ long unsigned int page_link;
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+ unsigned int dma_length;
+};
+
+struct sg_table {
+ struct scatterlist *sgl;
+ unsigned int nents;
+ unsigned int orig_nents;
+};
+
+enum suspend_stat_step {
+ SUSPEND_FREEZE = 1,
+ SUSPEND_PREPARE = 2,
+ SUSPEND_SUSPEND = 3,
+ SUSPEND_SUSPEND_LATE = 4,
+ SUSPEND_SUSPEND_NOIRQ = 5,
+ SUSPEND_RESUME_NOIRQ = 6,
+ SUSPEND_RESUME_EARLY = 7,
+ SUSPEND_RESUME = 8,
+};
+
+struct suspend_stats {
+ int success;
+ int fail;
+ int failed_freeze;
+ int failed_prepare;
+ int failed_suspend;
+ int failed_suspend_late;
+ int failed_suspend_noirq;
+ int failed_resume;
+ int failed_resume_early;
+ int failed_resume_noirq;
+ int last_failed_dev;
+ char failed_devs[80];
+ int last_failed_errno;
+ int errno[2];
+ int last_failed_step;
+ enum suspend_stat_step failed_steps[2];
+};
+
+enum {
+ Root_NFS = 255,
+ Root_CIFS = 254,
+ Root_RAM0 = 1048576,
+ Root_RAM1 = 1048577,
+ Root_FD0 = 2097152,
+ Root_HDA1 = 3145729,
+ Root_HDA2 = 3145730,
+ Root_SDA1 = 8388609,
+ Root_SDA2 = 8388610,
+ Root_HDC1 = 23068673,
+ Root_SR0 = 11534336,
+};
+
+struct iovec {
+ void *iov_base;
+ __kernel_size_t iov_len;
+};
+
+struct kvec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+typedef short unsigned int __kernel_sa_family_t;
+
+struct __kernel_sockaddr_storage {
+ union {
+ struct {
+ __kernel_sa_family_t ss_family;
+ char __data[126];
+ };
+ void *__align;
+ };
+};
+
+typedef __kernel_sa_family_t sa_family_t;
+
+struct sockaddr {
+ sa_family_t sa_family;
+ char sa_data[14];
+};
+
+struct msghdr {
+ void *msg_name;
+ int msg_namelen;
+ struct iov_iter msg_iter;
+ union {
+ void *msg_control;
+ void *msg_control_user;
+ };
+ bool msg_control_is_user: 1;
+ __kernel_size_t msg_controllen;
+ unsigned int msg_flags;
+ struct kiocb *msg_iocb;
+};
+
+struct xdr_buf {
+ struct kvec head[1];
+ struct kvec tail[1];
+ struct bio_vec *bvec;
+ struct page **pages;
+ unsigned int page_base;
+ unsigned int page_len;
+ unsigned int flags;
+ unsigned int buflen;
+ unsigned int len;
+};
+
+struct rpc_rqst;
+
+struct xdr_stream {
+ __be32 *p;
+ struct xdr_buf *buf;
+ __be32 *end;
+ struct kvec *iov;
+ struct kvec scratch;
+ struct page **page_ptr;
+ unsigned int nwords;
+ struct rpc_rqst *rqst;
+};
+
+struct rpc_xprt;
+
+struct rpc_task;
+
+struct rpc_cred;
+
+struct rpc_rqst {
+ struct rpc_xprt *rq_xprt;
+ struct xdr_buf rq_snd_buf;
+ struct xdr_buf rq_rcv_buf;
+ struct rpc_task *rq_task;
+ struct rpc_cred *rq_cred;
+ __be32 rq_xid;
+ int rq_cong;
+ u32 rq_seqno;
+ int rq_enc_pages_num;
+ struct page **rq_enc_pages;
+ void (*rq_release_snd_buf)(struct rpc_rqst *);
+ union {
+ struct list_head rq_list;
+ struct rb_node rq_recv;
+ };
+ struct list_head rq_xmit;
+ struct list_head rq_xmit2;
+ void *rq_buffer;
+ size_t rq_callsize;
+ void *rq_rbuffer;
+ size_t rq_rcvsize;
+ size_t rq_xmit_bytes_sent;
+ size_t rq_reply_bytes_recvd;
+ struct xdr_buf rq_private_buf;
+ long unsigned int rq_majortimeo;
+ long unsigned int rq_timeout;
+ ktime_t rq_rtt;
+ unsigned int rq_retries;
+ unsigned int rq_connect_cookie;
+ atomic_t rq_pin;
+ u32 rq_bytes_sent;
+ ktime_t rq_xtime;
+ int rq_ntrans;
+};
+
+typedef void (*kxdreproc_t)(struct rpc_rqst *, struct xdr_stream *, const void *);
+
+typedef int (*kxdrdproc_t)(struct rpc_rqst *, struct xdr_stream *, void *);
+
+struct rpc_procinfo;
+
+struct rpc_message {
+ const struct rpc_procinfo *rpc_proc;
+ void *rpc_argp;
+ void *rpc_resp;
+ const struct cred *rpc_cred;
+};
+
+struct rpc_procinfo {
+ u32 p_proc;
+ kxdreproc_t p_encode;
+ kxdrdproc_t p_decode;
+ unsigned int p_arglen;
+ unsigned int p_replen;
+ unsigned int p_timer;
+ u32 p_statidx;
+ const char *p_name;
+};
+
+struct rpc_wait {
+ struct list_head list;
+ struct list_head links;
+ struct list_head timer_list;
+};
+
+struct rpc_wait_queue;
+
+struct rpc_call_ops;
+
+struct rpc_clnt;
+
+struct rpc_task {
+ atomic_t tk_count;
+ int tk_status;
+ struct list_head tk_task;
+ void (*tk_callback)(struct rpc_task *);
+ void (*tk_action)(struct rpc_task *);
+ long unsigned int tk_timeout;
+ long unsigned int tk_runstate;
+ struct rpc_wait_queue *tk_waitqueue;
+ union {
+ struct work_struct tk_work;
+ struct rpc_wait tk_wait;
+ } u;
+ int tk_rpc_status;
+ struct rpc_message tk_msg;
+ void *tk_calldata;
+ const struct rpc_call_ops *tk_ops;
+ struct rpc_clnt *tk_client;
+ struct rpc_xprt *tk_xprt;
+ struct rpc_cred *tk_op_cred;
+ struct rpc_rqst *tk_rqstp;
+ struct workqueue_struct *tk_workqueue;
+ ktime_t tk_start;
+ pid_t tk_owner;
+ short unsigned int tk_flags;
+ short unsigned int tk_timeouts;
+ short unsigned int tk_pid;
+ unsigned char tk_priority: 2;
+ unsigned char tk_garb_retry: 2;
+ unsigned char tk_cred_retry: 2;
+ unsigned char tk_rebind_retry: 2;
+};
+
+struct rpc_timer {
+ struct list_head list;
+ long unsigned int expires;
+ struct delayed_work dwork;
+};
+
+struct rpc_wait_queue {
+ spinlock_t lock;
+ struct list_head tasks[4];
+ unsigned char maxpriority;
+ unsigned char priority;
+ unsigned char nr;
+ short unsigned int qlen;
+ struct rpc_timer timer_list;
+ const char *name;
+};
+
+struct rpc_call_ops {
+ void (*rpc_call_prepare)(struct rpc_task *, void *);
+ void (*rpc_call_done)(struct rpc_task *, void *);
+ void (*rpc_count_stats)(struct rpc_task *, void *);
+ void (*rpc_release)(void *);
+};
+
+struct rpc_iostats;
+
+struct rpc_pipe_dir_head {
+ struct list_head pdh_entries;
+ struct dentry *pdh_dentry;
+};
+
+struct rpc_rtt {
+ long unsigned int timeo;
+ long unsigned int srtt[5];
+ long unsigned int sdrtt[5];
+ int ntimeouts[5];
+};
+
+struct rpc_timeout {
+ long unsigned int to_initval;
+ long unsigned int to_maxval;
+ long unsigned int to_increment;
+ unsigned int to_retries;
+ unsigned char to_exponential;
+};
+
+struct rpc_xprt_switch;
+
+struct rpc_xprt_iter_ops;
+
+struct rpc_xprt_iter {
+ struct rpc_xprt_switch *xpi_xpswitch;
+ struct rpc_xprt *xpi_cursor;
+ const struct rpc_xprt_iter_ops *xpi_ops;
+};
+
+struct rpc_auth;
+
+struct rpc_stat;
+
+struct rpc_program;
+
+struct rpc_clnt {
+ atomic_t cl_count;
+ unsigned int cl_clid;
+ struct list_head cl_clients;
+ struct list_head cl_tasks;
+ spinlock_t cl_lock;
+ struct rpc_xprt *cl_xprt;
+ const struct rpc_procinfo *cl_procinfo;
+ u32 cl_prog;
+ u32 cl_vers;
+ u32 cl_maxproc;
+ struct rpc_auth *cl_auth;
+ struct rpc_stat *cl_stats;
+ struct rpc_iostats *cl_metrics;
+ unsigned int cl_softrtry: 1;
+ unsigned int cl_softerr: 1;
+ unsigned int cl_discrtry: 1;
+ unsigned int cl_noretranstimeo: 1;
+ unsigned int cl_autobind: 1;
+ unsigned int cl_chatty: 1;
+ struct rpc_rtt *cl_rtt;
+ const struct rpc_timeout *cl_timeout;
+ atomic_t cl_swapper;
+ int cl_nodelen;
+ char cl_nodename[65];
+ struct rpc_pipe_dir_head cl_pipedir_objects;
+ struct rpc_clnt *cl_parent;
+ struct rpc_rtt cl_rtt_default;
+ struct rpc_timeout cl_timeout_default;
+ const struct rpc_program *cl_program;
+ const char *cl_principal;
+ union {
+ struct rpc_xprt_iter cl_xpi;
+ struct work_struct cl_work;
+ };
+ const struct cred *cl_cred;
+};
+
+struct svc_xprt;
+
+struct rpc_xprt_ops;
+
+struct rpc_xprt {
+ struct kref kref;
+ const struct rpc_xprt_ops *ops;
+ const struct rpc_timeout *timeout;
+ struct __kernel_sockaddr_storage addr;
+ size_t addrlen;
+ int prot;
+ long unsigned int cong;
+ long unsigned int cwnd;
+ size_t max_payload;
+ struct rpc_wait_queue binding;
+ struct rpc_wait_queue sending;
+ struct rpc_wait_queue pending;
+ struct rpc_wait_queue backlog;
+ struct list_head free;
+ unsigned int max_reqs;
+ unsigned int min_reqs;
+ unsigned int num_reqs;
+ long unsigned int state;
+ unsigned char resvport: 1;
+ unsigned char reuseport: 1;
+ atomic_t swapper;
+ unsigned int bind_index;
+ struct list_head xprt_switch;
+ long unsigned int bind_timeout;
+ long unsigned int reestablish_timeout;
+ unsigned int connect_cookie;
+ struct work_struct task_cleanup;
+ struct timer_list timer;
+ long unsigned int last_used;
+ long unsigned int idle_timeout;
+ long unsigned int connect_timeout;
+ long unsigned int max_reconnect_timeout;
+ atomic_long_t queuelen;
+ spinlock_t transport_lock;
+ spinlock_t reserve_lock;
+ spinlock_t queue_lock;
+ u32 xid;
+ struct rpc_task *snd_task;
+ struct list_head xmit_queue;
+ struct svc_xprt *bc_xprt;
+ struct rb_root recv_queue;
+ struct {
+ long unsigned int bind_count;
+ long unsigned int connect_count;
+ long unsigned int connect_start;
+ long unsigned int connect_time;
+ long unsigned int sends;
+ long unsigned int recvs;
+ long unsigned int bad_xids;
+ long unsigned int max_slots;
+ long long unsigned int req_u;
+ long long unsigned int bklog_u;
+ long long unsigned int sending_u;
+ long long unsigned int pending_u;
+ } stat;
+ struct net *xprt_net;
+ const char *servername;
+ const char *address_strings[6];
+ struct callback_head rcu;
+};
+
+struct rpc_credops;
+
+struct rpc_cred {
+ struct hlist_node cr_hash;
+ struct list_head cr_lru;
+ struct callback_head cr_rcu;
+ struct rpc_auth *cr_auth;
+ const struct rpc_credops *cr_ops;
+ long unsigned int cr_expire;
+ long unsigned int cr_flags;
+ refcount_t cr_count;
+ const struct cred *cr_cred;
+};
+
+typedef u32 rpc_authflavor_t;
+
+struct ethhdr {
+ unsigned char h_dest[6];
+ unsigned char h_source[6];
+ __be16 h_proto;
+};
+
+struct flow_dissector {
+ unsigned int used_keys;
+ short unsigned int offset[27];
+};
+
+struct flowi_tunnel {
+ __be64 tun_id;
+};
+
+struct flowi_common {
+ int flowic_oif;
+ int flowic_iif;
+ __u32 flowic_mark;
+ __u8 flowic_tos;
+ __u8 flowic_scope;
+ __u8 flowic_proto;
+ __u8 flowic_flags;
+ __u32 flowic_secid;
+ kuid_t flowic_uid;
+ struct flowi_tunnel flowic_tun_key;
+ __u32 flowic_multipath_hash;
+};
+
+union flowi_uli {
+ struct {
+ __be16 dport;
+ __be16 sport;
+ } ports;
+ struct {
+ __u8 type;
+ __u8 code;
+ } icmpt;
+ struct {
+ __le16 dport;
+ __le16 sport;
+ } dnports;
+ __be32 spi;
+ __be32 gre_key;
+ struct {
+ __u8 type;
+ } mht;
+};
+
+struct flowi6 {
+ struct flowi_common __fl_common;
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ __be32 flowlabel;
+ union flowi_uli uli;
+ __u32 mp_hash;
+};
+
+struct ipstats_mib {
+ u64 mibs[37];
+ struct u64_stats_sync syncp;
+};
+
+struct icmp_mib {
+ long unsigned int mibs[28];
+};
+
+struct icmpmsg_mib {
+ atomic_long_t mibs[512];
+};
+
+struct icmpv6_mib {
+ long unsigned int mibs[6];
+};
+
+struct icmpv6_mib_device {
+ atomic_long_t mibs[6];
+};
+
+struct icmpv6msg_mib {
+ atomic_long_t mibs[512];
+};
+
+struct icmpv6msg_mib_device {
+ atomic_long_t mibs[512];
+};
+
+struct tcp_mib {
+ long unsigned int mibs[16];
+};
+
+struct udp_mib {
+ long unsigned int mibs[9];
+};
+
+struct linux_mib {
+ long unsigned int mibs[122];
+};
+
+struct linux_tls_mib {
+ long unsigned int mibs[11];
+};
+
+struct inet_frags;
+
+struct fqdir {
+ long int high_thresh;
+ long int low_thresh;
+ int timeout;
+ int max_dist;
+ struct inet_frags *f;
+ struct net *net;
+ bool dead;
+ long: 56;
+ long: 64;
+ long: 64;
+ struct rhashtable rhashtable;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ atomic_long_t mem;
+ struct work_struct destroy_work;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct inet_frag_queue;
+
+struct inet_frags {
+ unsigned int qsize;
+ void (*constructor)(struct inet_frag_queue *, const void *);
+ void (*destructor)(struct inet_frag_queue *);
+ void (*frag_expire)(struct timer_list *);
+ struct kmem_cache *frags_cachep;
+ const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
+ refcount_t refcnt;
+ struct completion completion;
+};
+
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
+};
+
+struct inet_frag_queue {
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
+ struct timer_list timer;
+ spinlock_t lock;
+ refcount_t refcnt;
+ struct rb_root rb_fragments;
+ struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
+ ktime_t stamp;
+ int len;
+ int meat;
+ __u8 flags;
+ u16 max_size;
+ struct fqdir *fqdir;
+ struct callback_head rcu;
+};
+
+enum tcp_ca_event {
+ CA_EVENT_TX_START = 0,
+ CA_EVENT_CWND_RESTART = 1,
+ CA_EVENT_COMPLETE_CWR = 2,
+ CA_EVENT_LOSS = 3,
+ CA_EVENT_ECN_NO_CE = 4,
+ CA_EVENT_ECN_IS_CE = 5,
+};
+
+struct ack_sample;
+
+struct rate_sample;
+
+union tcp_cc_info;
+
+struct tcp_congestion_ops {
+ struct list_head list;
+ u32 key;
+ u32 flags;
+ void (*init)(struct sock *);
+ void (*release)(struct sock *);
+ u32 (*ssthresh)(struct sock *);
+ void (*cong_avoid)(struct sock *, u32, u32);
+ void (*set_state)(struct sock *, u8);
+ void (*cwnd_event)(struct sock *, enum tcp_ca_event);
+ void (*in_ack_event)(struct sock *, u32);
+ u32 (*undo_cwnd)(struct sock *);
+ void (*pkts_acked)(struct sock *, const struct ack_sample *);
+ u32 (*min_tso_segs)(struct sock *);
+ u32 (*sndbuf_expand)(struct sock *);
+ void (*cong_control)(struct sock *, const struct rate_sample *);
+ size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *);
+ char name[16];
+ struct module *owner;
+};
+
+struct netlink_ext_ack;
+
+struct fib_notifier_ops {
+ int family;
+ struct list_head list;
+ unsigned int (*fib_seq_read)(struct net *);
+ int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *);
+ struct module *owner;
+ struct callback_head rcu;
+};
+
+struct xfrm_state;
+
+struct lwtunnel_state;
+
+struct dst_entry {
+ struct net_device *dev;
+ struct dst_ops *ops;
+ long unsigned int _metrics;
+ long unsigned int expires;
+ struct xfrm_state *xfrm;
+ int (*input)(struct sk_buff *);
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
+ short unsigned int flags;
+ short int obsolete;
+ short unsigned int header_len;
+ short unsigned int trailer_len;
+ atomic_t __refcnt;
+ int __use;
+ long unsigned int lastuse;
+ struct lwtunnel_state *lwtstate;
+ struct callback_head callback_head;
+ short int error;
+ short int __pad;
+ __u32 tclassid;
+};
+
+struct hh_cache {
+ unsigned int hh_len;
+ seqlock_t hh_lock;
+ long unsigned int hh_data[4];
+};
+
+struct neigh_table;
+
+struct neigh_parms;
+
+struct neigh_ops;
+
+struct neighbour {
+ struct neighbour *next;
+ struct neigh_table *tbl;
+ struct neigh_parms *parms;
+ long unsigned int confirmed;
+ long unsigned int updated;
+ rwlock_t lock;
+ refcount_t refcnt;
+ unsigned int arp_queue_len_bytes;
+ struct sk_buff_head arp_queue;
+ struct timer_list timer;
+ long unsigned int used;
+ atomic_t probes;
+ __u8 flags;
+ __u8 nud_state;
+ __u8 type;
+ __u8 dead;
+ u8 protocol;
+ seqlock_t ha_lock;
+ unsigned char ha[32];
+ struct hh_cache hh;
+ int (*output)(struct neighbour *, struct sk_buff *);
+ const struct neigh_ops *ops;
+ struct list_head gc_list;
+ struct callback_head rcu;
+ struct net_device *dev;
+ u8 primary_key[0];
+};
+
+struct ipv6_stable_secret {
+ bool initialized;
+ struct in6_addr secret;
+};
+
+struct ipv6_devconf {
+ __s32 forwarding;
+ __s32 hop_limit;
+ __s32 mtu6;
+ __s32 accept_ra;
+ __s32 accept_redirects;
+ __s32 autoconf;
+ __s32 dad_transmits;
+ __s32 rtr_solicits;
+ __s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
+ __s32 rtr_solicit_delay;
+ __s32 force_mld_version;
+ __s32 mldv1_unsolicited_report_interval;
+ __s32 mldv2_unsolicited_report_interval;
+ __s32 use_tempaddr;
+ __s32 temp_valid_lft;
+ __s32 temp_prefered_lft;
+ __s32 regen_max_retry;
+ __s32 max_desync_factor;
+ __s32 max_addresses;
+ __s32 accept_ra_defrtr;
+ __s32 accept_ra_min_hop_limit;
+ __s32 accept_ra_pinfo;
+ __s32 ignore_routes_with_linkdown;
+ __s32 proxy_ndp;
+ __s32 accept_source_route;
+ __s32 accept_ra_from_local;
+ __s32 disable_ipv6;
+ __s32 drop_unicast_in_l2_multicast;
+ __s32 accept_dad;
+ __s32 force_tllao;
+ __s32 ndisc_notify;
+ __s32 suppress_frag_ndisc;
+ __s32 accept_ra_mtu;
+ __s32 drop_unsolicited_na;
+ struct ipv6_stable_secret stable_secret;
+ __s32 use_oif_addrs_only;
+ __s32 keep_addr_on_down;
+ __s32 seg6_enabled;
+ __u32 enhanced_dad;
+ __u32 addr_gen_mode;
+ __s32 disable_policy;
+ __s32 ndisc_tclass;
+ __s32 rpl_seg_enabled;
+ struct ctl_table_header *sysctl_header;
+};
+
+struct nf_queue_entry;
+
+struct nf_queue_handler {
+ int (*outfn)(struct nf_queue_entry *, unsigned int);
+ void (*nf_hook_drop)(struct net *);
+};
+
+enum nf_log_type {
+ NF_LOG_TYPE_LOG = 0,
+ NF_LOG_TYPE_ULOG = 1,
+ NF_LOG_TYPE_MAX = 2,
+};
+
+typedef u8 u_int8_t;
+
+struct nf_loginfo;
+
+typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *);
+
+struct nf_logger {
+ char *name;
+ enum nf_log_type type;
+ nf_logfn *logfn;
+ struct module *me;
+};
+
+struct hlist_nulls_head {
+ struct hlist_nulls_node *first;
+};
+
+struct ip_conntrack_stat {
+ unsigned int found;
+ unsigned int invalid;
+ unsigned int ignore;
+ unsigned int insert;
+ unsigned int insert_failed;
+ unsigned int drop;
+ unsigned int early_drop;
+ unsigned int error;
+ unsigned int expect_new;
+ unsigned int expect_create;
+ unsigned int expect_delete;
+ unsigned int search_restart;
+};
+
+struct ct_pcpu {
+ spinlock_t lock;
+ struct hlist_nulls_head unconfirmed;
+ struct hlist_nulls_head dying;
+};
+
+typedef enum {
+ SS_FREE = 0,
+ SS_UNCONNECTED = 1,
+ SS_CONNECTING = 2,
+ SS_CONNECTED = 3,
+ SS_DISCONNECTING = 4,
+} socket_state;
+
+struct socket_wq {
+ wait_queue_head_t wait;
+ struct fasync_struct *fasync_list;
+ long unsigned int flags;
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+};
+
+struct proto_ops;
+
+struct socket {
+ socket_state state;
+ short int type;
+ long unsigned int flags;
+ struct file *file;
+ struct sock *sk;
+ const struct proto_ops *ops;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct socket_wq wq;
+};
+
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t);
+
+struct proto_ops {
+ int family;
+ struct module *owner;
+ int (*release)(struct socket *);
+ int (*bind)(struct socket *, struct sockaddr *, int);
+ int (*connect)(struct socket *, struct sockaddr *, int, int);
+ int (*socketpair)(struct socket *, struct socket *);
+ int (*accept)(struct socket *, struct socket *, int, bool);
+ int (*getname)(struct socket *, struct sockaddr *, int);
+ __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *);
+ int (*ioctl)(struct socket *, unsigned int, long unsigned int);
+ int (*gettstamp)(struct socket *, void *, bool, bool);
+ int (*listen)(struct socket *, int);
+ int (*shutdown)(struct socket *, int);
+ int (*setsockopt)(struct socket *, int, int, char *, unsigned int);
+ int (*getsockopt)(struct socket *, int, int, char *, int *);
+ void (*show_fdinfo)(struct seq_file *, struct socket *);
+ int (*sendmsg)(struct socket *, struct msghdr *, size_t);
+ int (*recvmsg)(struct socket *, struct msghdr *, size_t, int);
+ int (*mmap)(struct file *, struct socket *, struct vm_area_struct *);
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*set_peek_off)(struct sock *, int);
+ int (*peek_len)(struct socket *);
+ int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t);
+ int (*sendpage_locked)(struct sock *, struct page *, int, size_t, int);
+ int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t);
+ int (*set_rcvlowat)(struct sock *, int);
+};
+
+enum swiotlb_force {
+ SWIOTLB_NORMAL = 0,
+ SWIOTLB_FORCE = 1,
+ SWIOTLB_NO_FORCE = 2,
+};
+
+struct pipe_buf_operations;
+
+struct pipe_buffer {
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
+ const struct pipe_buf_operations *ops;
+ unsigned int flags;
+ long unsigned int private;
+};
+
+struct pipe_buf_operations {
+ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+ void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+};
+
+struct skb_ext {
+ refcount_t refcnt;
+ u8 offset[4];
+ u8 chunks;
+ short: 16;
+ char data[0];
+};
+
+struct skb_checksum_ops {
+ __wsum (*update)(const void *, int, __wsum);
+ __wsum (*combine)(__wsum, __wsum, int, int);
+};
+
+struct pernet_operations {
+ struct list_head list;
+ int (*init)(struct net *);
+ void (*pre_exit)(struct net *);
+ void (*exit)(struct net *);
+ void (*exit_batch)(struct list_head *);
+ unsigned int *id;
+ size_t size;
+};
+
+struct auth_cred {
+ const struct cred *cred;
+ const char *principal;
+};
+
+struct rpc_cred_cache;
+
+struct rpc_authops;
+
+struct rpc_auth {
+ unsigned int au_cslack;
+ unsigned int au_rslack;
+ unsigned int au_verfsize;
+ unsigned int au_ralign;
+ long unsigned int au_flags;
+ const struct rpc_authops *au_ops;
+ rpc_authflavor_t au_flavor;
+ refcount_t au_count;
+ struct rpc_cred_cache *au_credcache;
+};
+
+struct rpc_credops {
+ const char *cr_name;
+ int (*cr_init)(struct rpc_auth *, struct rpc_cred *);
+ void (*crdestroy)(struct rpc_cred *);
+ int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
+ int (*crmarshal)(struct rpc_task *, struct xdr_stream *);
+ int (*crrefresh)(struct rpc_task *);
+ int (*crvalidate)(struct rpc_task *, struct xdr_stream *);
+ int (*crwrap_req)(struct rpc_task *, struct xdr_stream *);
+ int (*crunwrap_resp)(struct rpc_task *, struct xdr_stream *);
+ int (*crkey_timeout)(struct rpc_cred *);
+ char * (*crstringify_acceptor)(struct rpc_cred *);
+ bool (*crneed_reencode)(struct rpc_task *);
+};
+
+struct rpc_auth_create_args;
+
+struct rpcsec_gss_info;
+
+struct rpc_authops {
+ struct module *owner;
+ rpc_authflavor_t au_flavor;
+ char *au_name;
+ struct rpc_auth * (*create)(const struct rpc_auth_create_args *, struct rpc_clnt *);
+ void (*destroy)(struct rpc_auth *);
+ int (*hash_cred)(struct auth_cred *, unsigned int);
+ struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
+ struct rpc_cred * (*crcreate)(struct rpc_auth *, struct auth_cred *, int, gfp_t);
+ rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
+ int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *);
+ int (*key_timeout)(struct rpc_auth *, struct rpc_cred *);
+};
+
+struct rpc_auth_create_args {
+ rpc_authflavor_t pseudoflavor;
+ const char *target_name;
+};
+
+struct rpcsec_gss_oid {
+ unsigned int len;
+ u8 data[32];
+};
+
+struct rpcsec_gss_info {
+ struct rpcsec_gss_oid oid;
+ u32 qop;
+ u32 service;
+};
+
+struct rpc_xprt_ops {
+ void (*set_buffer_size)(struct rpc_xprt *, size_t, size_t);
+ int (*reserve_xprt)(struct rpc_xprt *, struct rpc_task *);
+ void (*release_xprt)(struct rpc_xprt *, struct rpc_task *);
+ void (*alloc_slot)(struct rpc_xprt *, struct rpc_task *);
+ void (*free_slot)(struct rpc_xprt *, struct rpc_rqst *);
+ void (*rpcbind)(struct rpc_task *);
+ void (*set_port)(struct rpc_xprt *, short unsigned int);
+ void (*connect)(struct rpc_xprt *, struct rpc_task *);
+ int (*buf_alloc)(struct rpc_task *);
+ void (*buf_free)(struct rpc_task *);
+ void (*prepare_request)(struct rpc_rqst *);
+ int (*send_request)(struct rpc_rqst *);
+ void (*wait_for_reply_request)(struct rpc_task *);
+ void (*timer)(struct rpc_xprt *, struct rpc_task *);
+ void (*release_request)(struct rpc_task *);
+ void (*close)(struct rpc_xprt *);
+ void (*destroy)(struct rpc_xprt *);
+ void (*set_connect_timeout)(struct rpc_xprt *, long unsigned int, long unsigned int);
+ void (*print_stats)(struct rpc_xprt *, struct seq_file *);
+ int (*enable_swap)(struct rpc_xprt *);
+ void (*disable_swap)(struct rpc_xprt *);
+ void (*inject_disconnect)(struct rpc_xprt *);
+ int (*bc_setup)(struct rpc_xprt *, unsigned int);
+ size_t (*bc_maxpayload)(struct rpc_xprt *);
+ unsigned int (*bc_num_slots)(struct rpc_xprt *);
+ void (*bc_free_rqst)(struct rpc_rqst *);
+ void (*bc_destroy)(struct rpc_xprt *, unsigned int);
+};
+
+struct rpc_xprt_switch {
+ spinlock_t xps_lock;
+ struct kref xps_kref;
+ unsigned int xps_nxprts;
+ unsigned int xps_nactive;
+ atomic_long_t xps_queuelen;
+ struct list_head xps_xprt_list;
+ struct net *xps_net;
+ const struct rpc_xprt_iter_ops *xps_iter_ops;
+ struct callback_head xps_rcu;
+};
+
+struct rpc_stat {
+ const struct rpc_program *program;
+ unsigned int netcnt;
+ unsigned int netudpcnt;
+ unsigned int nettcpcnt;
+ unsigned int nettcpconn;
+ unsigned int netreconn;
+ unsigned int rpccnt;
+ unsigned int rpcretrans;
+ unsigned int rpcauthrefresh;
+ unsigned int rpcgarbage;
+};
+
+struct rpc_version;
+
+struct rpc_program {
+ const char *name;
+ u32 number;
+ unsigned int nrvers;
+ const struct rpc_version **version;
+ struct rpc_stat *stats;
+ const char *pipe_dir_name;
+};
+
+struct ipv6_params {
+ __s32 disable_ipv6;
+ __s32 autoconf;
+};
+
+struct dql {
+ unsigned int num_queued;
+ unsigned int adj_limit;
+ unsigned int last_obj_cnt;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ unsigned int limit;
+ unsigned int num_completed;
+ unsigned int prev_ovlimit;
+ unsigned int prev_num_queued;
+ unsigned int prev_last_obj_cnt;
+ unsigned int lowest_slack;
+ long unsigned int slack_start_time;
+ unsigned int max_limit;
+ unsigned int min_limit;
+ unsigned int slack_hold_time;
+ long: 32;
+ long: 64;
+ long: 64;
+};
+
+typedef struct {
+ unsigned int clock_rate;
+ unsigned int clock_type;
+ short unsigned int loopback;
+} sync_serial_settings;
+
+typedef struct {
+ unsigned int clock_rate;
+ unsigned int clock_type;
+ short unsigned int loopback;
+ unsigned int slot_map;
+} te1_settings;
+
+typedef struct {
+ short unsigned int encoding;
+ short unsigned int parity;
+} raw_hdlc_proto;
+
+typedef struct {
+ unsigned int t391;
+ unsigned int t392;
+ unsigned int n391;
+ unsigned int n392;
+ unsigned int n393;
+ short unsigned int lmi;
+ short unsigned int dce;
+} fr_proto;
+
+typedef struct {
+ unsigned int dlci;
+} fr_proto_pvc;
+
+typedef struct {
+ unsigned int dlci;
+ char master[16];
+} fr_proto_pvc_info;
+
+typedef struct {
+ unsigned int interval;
+ unsigned int timeout;
+} cisco_proto;
+
+typedef struct {
+ short unsigned int dce;
+ unsigned int modulo;
+ unsigned int window;
+ unsigned int t1;
+ unsigned int t2;
+ unsigned int n2;
+} x25_hdlc_proto;
+
+struct ifmap {
+ long unsigned int mem_start;
+ long unsigned int mem_end;
+ short unsigned int base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct if_settings {
+ unsigned int type;
+ unsigned int size;
+ union {
+ raw_hdlc_proto *raw_hdlc;
+ cisco_proto *cisco;
+ fr_proto *fr;
+ fr_proto_pvc *fr_pvc;
+ fr_proto_pvc_info *fr_pvc_info;
+ x25_hdlc_proto *x25;
+ sync_serial_settings *sync;
+ te1_settings *te1;
+ } ifs_ifsu;
+};
+
+struct ifreq {
+ union {
+ char ifrn_name[16];
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short int ifru_flags;
+ int ifru_ivalue;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[16];
+ char ifru_newname[16];
+ void *ifru_data;
+ struct if_settings ifru_settings;
+ } ifr_ifru;
+};
+
+struct ethtool_drvinfo {
+ __u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char erom_version[32];
+ char reserved2[12];
+ __u32 n_priv_flags;
+ __u32 n_stats;
+ __u32 testinfo_len;
+ __u32 eedump_len;
+ __u32 regdump_len;
+};
+
+struct ethtool_wolinfo {
+ __u32 cmd;
+ __u32 supported;
+ __u32 wolopts;
+ __u8 sopass[6];
+};
+
+struct ethtool_tunable {
+ __u32 cmd;
+ __u32 id;
+ __u32 type_id;
+ __u32 len;
+ void *data[0];
+};
+
+struct ethtool_regs {
+ __u32 cmd;
+ __u32 version;
+ __u32 len;
+ __u8 data[0];
+};
+
+struct ethtool_eeprom {
+ __u32 cmd;
+ __u32 magic;
+ __u32 offset;
+ __u32 len;
+ __u8 data[0];
+};
+
+struct ethtool_eee {
+ __u32 cmd;
+ __u32 supported;
+ __u32 advertised;
+ __u32 lp_advertised;
+ __u32 eee_active;
+ __u32 eee_enabled;
+ __u32 tx_lpi_enabled;
+ __u32 tx_lpi_timer;
+ __u32 reserved[2];
+};
+
+struct ethtool_modinfo {
+ __u32 cmd;
+ __u32 type;
+ __u32 eeprom_len;
+ __u32 reserved[8];
+};
+
+struct ethtool_coalesce {
+ __u32 cmd;
+ __u32 rx_coalesce_usecs;
+ __u32 rx_max_coalesced_frames;
+ __u32 rx_coalesce_usecs_irq;
+ __u32 rx_max_coalesced_frames_irq;
+ __u32 tx_coalesce_usecs;
+ __u32 tx_max_coalesced_frames;
+ __u32 tx_coalesce_usecs_irq;
+ __u32 tx_max_coalesced_frames_irq;
+ __u32 stats_block_coalesce_usecs;
+ __u32 use_adaptive_rx_coalesce;
+ __u32 use_adaptive_tx_coalesce;
+ __u32 pkt_rate_low;
+ __u32 rx_coalesce_usecs_low;
+ __u32 rx_max_coalesced_frames_low;
+ __u32 tx_coalesce_usecs_low;
+ __u32 tx_max_coalesced_frames_low;
+ __u32 pkt_rate_high;
+ __u32 rx_coalesce_usecs_high;
+ __u32 rx_max_coalesced_frames_high;
+ __u32 tx_coalesce_usecs_high;
+ __u32 tx_max_coalesced_frames_high;
+ __u32 rate_sample_interval;
+};
+
+struct ethtool_ringparam {
+ __u32 cmd;
+ __u32 rx_max_pending;
+ __u32 rx_mini_max_pending;
+ __u32 rx_jumbo_max_pending;
+ __u32 tx_max_pending;
+ __u32 rx_pending;
+ __u32 rx_mini_pending;
+ __u32 rx_jumbo_pending;
+ __u32 tx_pending;
+};
+
+struct ethtool_channels {
+ __u32 cmd;
+ __u32 max_rx;
+ __u32 max_tx;
+ __u32 max_other;
+ __u32 max_combined;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+struct ethtool_pauseparam {
+ __u32 cmd;
+ __u32 autoneg;
+ __u32 rx_pause;
+ __u32 tx_pause;
+};
+
+struct ethtool_test {
+ __u32 cmd;
+ __u32 flags;
+ __u32 reserved;
+ __u32 len;
+ __u64 data[0];
+};
+
+struct ethtool_stats {
+ __u32 cmd;
+ __u32 n_stats;
+ __u64 data[0];
+};
+
+struct ethtool_tcpip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be16 psrc;
+ __be16 pdst;
+ __u8 tos;
+};
+
+struct ethtool_ah_espip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be32 spi;
+ __u8 tos;
+};
+
+struct ethtool_usrip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be32 l4_4_bytes;
+ __u8 tos;
+ __u8 ip_ver;
+ __u8 proto;
+};
+
+struct ethtool_tcpip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be16 psrc;
+ __be16 pdst;
+ __u8 tclass;
+};
+
+struct ethtool_ah_espip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be32 spi;
+ __u8 tclass;
+};
+
+struct ethtool_usrip6_spec {
+ __be32 ip6src[4];
+ __be32 ip6dst[4];
+ __be32 l4_4_bytes;
+ __u8 tclass;
+ __u8 l4_proto;
+};
+
+union ethtool_flow_union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_tcpip4_spec udp_ip4_spec;
+ struct ethtool_tcpip4_spec sctp_ip4_spec;
+ struct ethtool_ah_espip4_spec ah_ip4_spec;
+ struct ethtool_ah_espip4_spec esp_ip4_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ struct ethtool_tcpip6_spec tcp_ip6_spec;
+ struct ethtool_tcpip6_spec udp_ip6_spec;
+ struct ethtool_tcpip6_spec sctp_ip6_spec;
+ struct ethtool_ah_espip6_spec ah_ip6_spec;
+ struct ethtool_ah_espip6_spec esp_ip6_spec;
+ struct ethtool_usrip6_spec usr_ip6_spec;
+ struct ethhdr ether_spec;
+ __u8 hdata[52];
+};
+
+struct ethtool_flow_ext {
+ __u8 padding[2];
+ unsigned char h_dest[6];
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ __be32 data[2];
+};
+
+struct ethtool_rx_flow_spec {
+ __u32 flow_type;
+ union ethtool_flow_union h_u;
+ struct ethtool_flow_ext h_ext;
+ union ethtool_flow_union m_u;
+ struct ethtool_flow_ext m_ext;
+ __u64 ring_cookie;
+ __u32 location;
+};
+
+struct ethtool_rxnfc {
+ __u32 cmd;
+ __u32 flow_type;
+ __u64 data;
+ struct ethtool_rx_flow_spec fs;
+ union {
+ __u32 rule_cnt;
+ __u32 rss_context;
+ };
+ __u32 rule_locs[0];
+};
+
+struct ethtool_flash {
+ __u32 cmd;
+ __u32 region;
+ char data[128];
+};
+
+struct ethtool_dump {
+ __u32 cmd;
+ __u32 version;
+ __u32 flag;
+ __u32 len;
+ __u8 data[0];
+};
+
+struct ethtool_ts_info {
+ __u32 cmd;
+ __u32 so_timestamping;
+ __s32 phc_index;
+ __u32 tx_types;
+ __u32 tx_reserved[3];
+ __u32 rx_filters;
+ __u32 rx_reserved[3];
+};
+
+struct ethtool_fecparam {
+ __u32 cmd;
+ __u32 active_fec;
+ __u32 fec;
+ __u32 reserved;
+};
+
+struct ethtool_link_settings {
+ __u32 cmd;
+ __u32 speed;
+ __u8 duplex;
+ __u8 port;
+ __u8 phy_address;
+ __u8 autoneg;
+ __u8 mdio_support;
+ __u8 eth_tp_mdix;
+ __u8 eth_tp_mdix_ctrl;
+ __s8 link_mode_masks_nwords;
+ __u8 transceiver;
+ __u8 master_slave_cfg;
+ __u8 master_slave_state;
+ __u8 reserved1[1];
+ __u32 reserved[7];
+ __u32 link_mode_masks[0];
+};
+
+enum ethtool_phys_id_state {
+ ETHTOOL_ID_INACTIVE = 0,
+ ETHTOOL_ID_ACTIVE = 1,
+ ETHTOOL_ID_ON = 2,
+ ETHTOOL_ID_OFF = 3,
+};
+
+struct ethtool_link_ksettings {
+ struct ethtool_link_settings base;
+ struct {
+ long unsigned int supported[2];
+ long unsigned int advertising[2];
+ long unsigned int lp_advertising[2];
+ } link_modes;
+};
+
+struct ethtool_ops {
+ u32 supported_coalesce_params;
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);
+ int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32, u8 *);
+ int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);
+ int (*begin)(struct net_device *);
+ void (*complete)(struct net_device *);
+ u32 (*get_priv_flags)(struct net_device *);
+ int (*set_priv_flags)(struct net_device *, u32);
+ int (*get_sset_count)(struct net_device *, int);
+ int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);
+ int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
+ int (*flash_device)(struct net_device *, struct ethtool_flash *);
+ int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rxfh_key_size)(struct net_device *);
+ u32 (*get_rxfh_indir_size)(struct net_device *);
+ int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);
+ int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8);
+ int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32);
+ int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool);
+ void (*get_channels)(struct net_device *, struct ethtool_channels *);
+ int (*set_channels)(struct net_device *, struct ethtool_channels *);
+ int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
+ int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);
+ int (*set_dump)(struct net_device *, struct ethtool_dump *);
+ int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);
+ int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_eee)(struct net_device *, struct ethtool_eee *);
+ int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);
+ int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);
+ int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *);
+ int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *);
+ int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);
+ int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *);
+ int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *);
+ int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *);
+ void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *);
+};
+
+struct netprio_map {
+ struct callback_head rcu;
+ u32 priomap_len;
+ u32 priomap[0];
+};
+
+struct xdp_mem_info {
+ u32 type;
+ u32 id;
+};
+
+struct xdp_rxq_info {
+ struct net_device *dev;
+ u32 queue_index;
+ u32 reg_state;
+ struct xdp_mem_info mem;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xdp_frame {
+ void *data;
+ u16 len;
+ u16 headroom;
+ u32 metasize: 8;
+ u32 frame_sz: 24;
+ struct xdp_mem_info mem;
+ struct net_device *dev_rx;
+};
+
+struct nlmsghdr {
+ __u32 nlmsg_len;
+ __u16 nlmsg_type;
+ __u16 nlmsg_flags;
+ __u32 nlmsg_seq;
+ __u32 nlmsg_pid;
+};
+
+struct nlattr {
+ __u16 nla_len;
+ __u16 nla_type;
+};
+
+struct netlink_ext_ack {
+ const char *_msg;
+ const struct nlattr *bad_attr;
+ u8 cookie[20];
+ u8 cookie_len;
+};
+
+struct netlink_callback {
+ struct sk_buff *skb;
+ const struct nlmsghdr *nlh;
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
+ int (*done)(struct netlink_callback *);
+ void *data;
+ struct module *module;
+ struct netlink_ext_ack *extack;
+ u16 family;
+ u16 answer_flags;
+ u32 min_dump_alloc;
+ unsigned int prev_seq;
+ unsigned int seq;
+ bool strict_check;
+ union {
+ u8 ctx[48];
+ long int args[6];
+ };
+};
+
+struct ndmsg {
+ __u8 ndm_family;
+ __u8 ndm_pad1;
+ __u16 ndm_pad2;
+ __s32 ndm_ifindex;
+ __u16 ndm_state;
+ __u8 ndm_flags;
+ __u8 ndm_type;
+};
+
+struct rtnl_link_stats64 {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
+ __u64 collisions;
+ __u64 rx_length_errors;
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
+ __u64 tx_aborted_errors;
+ __u64 tx_carrier_errors;
+ __u64 tx_fifo_errors;
+ __u64 tx_heartbeat_errors;
+ __u64 tx_window_errors;
+ __u64 rx_compressed;
+ __u64 tx_compressed;
+ __u64 rx_nohandler;
+};
+
+struct ifla_vf_guid {
+ __u32 vf;
+ __u64 guid;
+};
+
+struct ifla_vf_stats {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 broadcast;
+ __u64 multicast;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+};
+
+struct ifla_vf_info {
+ __u32 vf;
+ __u8 mac[32];
+ __u32 vlan;
+ __u32 qos;
+ __u32 spoofchk;
+ __u32 linkstate;
+ __u32 min_tx_rate;
+ __u32 max_tx_rate;
+ __u32 rss_query_en;
+ __u32 trusted;
+ __be16 vlan_proto;
+};
+
+struct tc_stats {
+ __u64 bytes;
+ __u32 packets;
+ __u32 drops;
+ __u32 overlimits;
+ __u32 bps;
+ __u32 pps;
+ __u32 qlen;
+ __u32 backlog;
+};
+
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short int cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum netdev_tx {
+ __NETDEV_TX_MIN = -2147483648,
+ NETDEV_TX_OK = 0,
+ NETDEV_TX_BUSY = 16,
+};
+
+typedef enum netdev_tx netdev_tx_t;
+
+struct header_ops {
+ int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int);
+ int (*parse)(const struct sk_buff *, unsigned char *);
+ int (*cache)(const struct neighbour *, struct hh_cache *, __be16);
+ void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);
+ bool (*validate)(const char *, unsigned int);
+ __be16 (*parse_protocol)(const struct sk_buff *);
+};
+
+struct gro_list {
+ struct list_head list;
+ int count;
+};
+
+struct napi_struct {
+ struct list_head poll_list;
+ long unsigned int state;
+ int weight;
+ int defer_hard_irqs_count;
+ long unsigned int gro_bitmask;
+ int (*poll)(struct napi_struct *, int);
+ struct net_device *dev;
+ struct gro_list gro_hash[8];
+ struct sk_buff *skb;
+ struct list_head rx_list;
+ int rx_count;
+ struct hrtimer timer;
+ struct list_head dev_list;
+ struct hlist_node napi_hash_node;
+ unsigned int napi_id;
+};
+
+struct xdp_umem;
+
+struct netdev_queue {
+ struct net_device *dev;
+ struct Qdisc *qdisc;
+ struct Qdisc *qdisc_sleeping;
+ struct kobject kobj;
+ long unsigned int tx_maxrate;
+ long unsigned int trans_timeout;
+ struct net_device *sb_dev;
+ struct xdp_umem *umem;
+ long: 64;
+ spinlock_t _xmit_lock;
+ int xmit_lock_owner;
+ long unsigned int trans_start;
+ long unsigned int state;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct dql dql;
+};
+
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct gnet_stats_basic_packed {
+ __u64 bytes;
+ __u64 packets;
+};
+
+struct gnet_stats_queue {
+ __u32 qlen;
+ __u32 backlog;
+ __u32 drops;
+ __u32 requeues;
+ __u32 overlimits;
+};
+
+struct Qdisc_ops;
+
+struct qdisc_size_table;
+
+struct net_rate_estimator;
+
+struct gnet_stats_basic_cpu;
+
+struct Qdisc {
+ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
+ struct sk_buff * (*dequeue)(struct Qdisc *);
+ unsigned int flags;
+ u32 limit;
+ const struct Qdisc_ops *ops;
+ struct qdisc_size_table *stab;
+ struct hlist_node hash;
+ u32 handle;
+ u32 parent;
+ struct netdev_queue *dev_queue;
+ struct net_rate_estimator *rate_est;
+ struct gnet_stats_basic_cpu *cpu_bstats;
+ struct gnet_stats_queue *cpu_qstats;
+ int padded;
+ refcount_t refcnt;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct sk_buff_head gso_skb;
+ struct qdisc_skb_head q;
+ struct gnet_stats_basic_packed bstats;
+ seqcount_t running;
+ struct gnet_stats_queue qstats;
+ long unsigned int state;
+ struct Qdisc *next_sched;
+ struct sk_buff_head skb_bad_txq;
+ long: 64;
+ long: 64;
+ spinlock_t busylock;
+ spinlock_t seqlock;
+ bool empty;
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct rps_map {
+ unsigned int len;
+ struct callback_head rcu;
+ u16 cpus[0];
+};
+
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct callback_head rcu;
+ struct rps_dev_flow flows[0];
+};
+
+struct rps_sock_flow_table {
+ u32 mask;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u32 ents[0];
+};
+
+struct netdev_rx_queue {
+ struct rps_map *rps_map;
+ struct rps_dev_flow_table *rps_flow_table;
+ struct kobject kobj;
+ struct net_device *dev;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_umem *umem;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xps_map {
+ unsigned int len;
+ unsigned int alloc_len;
+ struct callback_head rcu;
+ u16 queues[0];
+};
+
+struct xps_dev_maps {
+ struct callback_head rcu;
+ struct xps_map *attr_map[0];
+};
+
+struct netdev_phys_item_id {
+ unsigned char id[32];
+ unsigned char id_len;
+};
+
+enum tc_setup_type {
+ TC_SETUP_QDISC_MQPRIO = 0,
+ TC_SETUP_CLSU32 = 1,
+ TC_SETUP_CLSFLOWER = 2,
+ TC_SETUP_CLSMATCHALL = 3,
+ TC_SETUP_CLSBPF = 4,
+ TC_SETUP_BLOCK = 5,
+ TC_SETUP_QDISC_CBS = 6,
+ TC_SETUP_QDISC_RED = 7,
+ TC_SETUP_QDISC_PRIO = 8,
+ TC_SETUP_QDISC_MQ = 9,
+ TC_SETUP_QDISC_ETF = 10,
+ TC_SETUP_ROOT_QDISC = 11,
+ TC_SETUP_QDISC_GRED = 12,
+ TC_SETUP_QDISC_TAPRIO = 13,
+ TC_SETUP_FT = 14,
+ TC_SETUP_QDISC_ETS = 15,
+ TC_SETUP_QDISC_TBF = 16,
+ TC_SETUP_QDISC_FIFO = 17,
+};
+
+enum bpf_netdev_command {
+ XDP_SETUP_PROG = 0,
+ XDP_SETUP_PROG_HW = 1,
+ XDP_QUERY_PROG = 2,
+ XDP_QUERY_PROG_HW = 3,
+ BPF_OFFLOAD_MAP_ALLOC = 4,
+ BPF_OFFLOAD_MAP_FREE = 5,
+ XDP_SETUP_XSK_UMEM = 6,
+};
+
+struct netdev_bpf {
+ enum bpf_netdev_command command;
+ union {
+ struct {
+ u32 flags;
+ struct bpf_prog *prog;
+ struct netlink_ext_ack *extack;
+ };
+ struct {
+ u32 prog_id;
+ u32 prog_flags;
+ };
+ struct {
+ struct bpf_offloaded_map *offmap;
+ };
+ struct {
+ struct xdp_umem *umem;
+ u16 queue_id;
+ } xsk;
+ };
+};
+
+struct dev_ifalias {
+ struct callback_head rcuhead;
+ char ifalias[0];
+};
+
+struct netdev_name_node {
+ struct hlist_node hlist;
+ struct list_head list;
+ struct net_device *dev;
+ const char *name;
+};
+
+struct udp_tunnel_info;
+
+struct devlink_port;
+
+struct ip_tunnel_parm;
+
+struct net_device_ops {
+ int (*ndo_init)(struct net_device *);
+ void (*ndo_uninit)(struct net_device *);
+ int (*ndo_open)(struct net_device *);
+ int (*ndo_stop)(struct net_device *);
+ netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *);
+ netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t);
+ u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *);
+ void (*ndo_change_rx_flags)(struct net_device *, int);
+ void (*ndo_set_rx_mode)(struct net_device *);
+ int (*ndo_set_mac_address)(struct net_device *, void *);
+ int (*ndo_validate_addr)(struct net_device *);
+ int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);
+ int (*ndo_set_config)(struct net_device *, struct ifmap *);
+ int (*ndo_change_mtu)(struct net_device *, int);
+ int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);
+ void (*ndo_tx_timeout)(struct net_device *, unsigned int);
+ void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);
+ bool (*ndo_has_offload_stats)(const struct net_device *, int);
+ int (*ndo_get_offload_stats)(int, const struct net_device *, void *);
+ struct net_device_stats * (*ndo_get_stats)(struct net_device *);
+ int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16);
+ int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16);
+ int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);
+ int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16);
+ int (*ndo_set_vf_rate)(struct net_device *, int, int, int);
+ int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool);
+ int (*ndo_set_vf_trust)(struct net_device *, int, bool);
+ int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);
+ int (*ndo_set_vf_link_state)(struct net_device *, int, int);
+ int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);
+ int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);
+ int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);
+ int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *);
+ int (*ndo_set_vf_guid)(struct net_device *, int, u64, int);
+ int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool);
+ int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *);
+ int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32);
+ int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *);
+ int (*ndo_del_slave)(struct net_device *, struct net_device *);
+ struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool);
+ netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t);
+ int (*ndo_set_features)(struct net_device *, netdev_features_t);
+ int (*ndo_neigh_construct)(struct net_device *, struct neighbour *);
+ void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *);
+ int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, struct netlink_ext_ack *);
+ int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16);
+ int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *);
+ int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *);
+ int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *);
+ int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int);
+ int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16);
+ int (*ndo_change_carrier)(struct net_device *, bool);
+ int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);
+ int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *);
+ int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t);
+ void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *);
+ void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *);
+ void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);
+ void (*ndo_dfwd_del_station)(struct net_device *, void *);
+ int (*ndo_set_tx_maxrate)(struct net_device *, int, u32);
+ int (*ndo_get_iflink)(const struct net_device *);
+ int (*ndo_change_proto_down)(struct net_device *, bool);
+ int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);
+ void (*ndo_set_rx_headroom)(struct net_device *, int);
+ int (*ndo_bpf)(struct net_device *, struct netdev_bpf *);
+ int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32);
+ int (*ndo_xsk_wakeup)(struct net_device *, u32, u32);
+ struct devlink_port * (*ndo_get_devlink_port)(struct net_device *);
+ int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm *, int);
+};
+
+struct neigh_parms {
+ possible_net_t net;
+ struct net_device *dev;
+ struct list_head list;
+ int (*neigh_setup)(struct neighbour *);
+ struct neigh_table *tbl;
+ void *sysctl_table;
+ int dead;
+ refcount_t refcnt;
+ struct callback_head callback_head;
+ int reachable_time;
+ int data[13];
+ long unsigned int data_state[1];
+};
+
+struct pcpu_lstats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct pcpu_sw_netstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct nd_opt_hdr;
+
+struct ndisc_options;
+
+struct prefix_info;
+
+struct ndisc_ops {
+ int (*is_useropt)(u8);
+ int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *);
+ void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *);
+ int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **);
+ void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *);
+ void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool);
+};
+
+struct ipv6_devstat {
+ struct proc_dir_entry *proc_dir_entry;
+ struct ipstats_mib *ipv6;
+ struct icmpv6_mib_device *icmpv6dev;
+ struct icmpv6msg_mib_device *icmpv6msgdev;
+};
+
+struct ifmcaddr6;
+
+struct ifacaddr6;
+
+struct inet6_dev {
+ struct net_device *dev;
+ struct list_head addr_list;
+ struct ifmcaddr6 *mc_list;
+ struct ifmcaddr6 *mc_tomb;
+ spinlock_t mc_lock;
+ unsigned char mc_qrv;
+ unsigned char mc_gq_running;
+ unsigned char mc_ifc_count;
+ unsigned char mc_dad_count;
+ long unsigned int mc_v1_seen;
+ long unsigned int mc_qi;
+ long unsigned int mc_qri;
+ long unsigned int mc_maxdelay;
+ struct timer_list mc_gq_timer;
+ struct timer_list mc_ifc_timer;
+ struct timer_list mc_dad_timer;
+ struct ifacaddr6 *ac_list;
+ rwlock_t lock;
+ refcount_t refcnt;
+ __u32 if_flags;
+ int dead;
+ u32 desync_factor;
+ struct list_head tempaddr_list;
+ struct in6_addr token;
+ struct neigh_parms *nd_parms;
+ struct ipv6_devconf cnf;
+ struct ipv6_devstat stats;
+ struct timer_list rs_timer;
+ __s32 rs_interval;
+ __u8 rs_probes;
+ long unsigned int tstamp;
+ struct callback_head rcu;
+};
+
+struct nla_policy;
+
+struct rtnl_link_ops {
+ struct list_head list;
+ const char *kind;
+ size_t priv_size;
+ void (*setup)(struct net_device *);
+ unsigned int maxtype;
+ const struct nla_policy *policy;
+ int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
+ int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
+ int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
+ void (*dellink)(struct net_device *, struct list_head *);
+ size_t (*get_size)(const struct net_device *);
+ int (*fill_info)(struct sk_buff *, const struct net_device *);
+ size_t (*get_xstats_size)(const struct net_device *);
+ int (*fill_xstats)(struct sk_buff *, const struct net_device *);
+ unsigned int (*get_num_tx_queues)();
+ unsigned int (*get_num_rx_queues)();
+ unsigned int slave_maxtype;
+ const struct nla_policy *slave_policy;
+ int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
+ size_t (*get_slave_size)(const struct net_device *, const struct net_device *);
+ int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *);
+ struct net * (*get_link_net)(const struct net_device *);
+ size_t (*get_linkxstats_size)(const struct net_device *, int);
+ int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int);
+};
+
+struct sd_flow_limit {
+ u64 count;
+ unsigned int num_buckets;
+ unsigned int history_head;
+ u16 history[128];
+ u8 buckets[0];
+};
+
+struct softnet_data {
+ struct list_head poll_list;
+ struct sk_buff_head process_queue;
+ unsigned int processed;
+ unsigned int time_squeeze;
+ unsigned int received_rps;
+ struct softnet_data *rps_ipi_list;
+ struct sd_flow_limit *flow_limit;
+ struct Qdisc *output_queue;
+ struct Qdisc **output_queue_tailp;
+ struct sk_buff *completion_queue;
+ struct {
+ u16 recursion;
+ u8 more;
+ } xmit;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ unsigned int input_queue_head;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ call_single_data_t csd;
+ struct softnet_data *rps_ipi_next;
+ unsigned int cpu;
+ unsigned int input_queue_tail;
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ RTAX_UNSPEC = 0,
+ RTAX_LOCK = 1,
+ RTAX_MTU = 2,
+ RTAX_WINDOW = 3,
+ RTAX_RTT = 4,
+ RTAX_RTTVAR = 5,
+ RTAX_SSTHRESH = 6,
+ RTAX_CWND = 7,
+ RTAX_ADVMSS = 8,
+ RTAX_REORDERING = 9,
+ RTAX_HOPLIMIT = 10,
+ RTAX_INITCWND = 11,
+ RTAX_FEATURES = 12,
+ RTAX_RTO_MIN = 13,
+ RTAX_INITRWND = 14,
+ RTAX_QUICKACK = 15,
+ RTAX_CC_ALGO = 16,
+ RTAX_FASTOPEN_NO_COOKIE = 17,
+ __RTAX_MAX = 18,
+};
+
+struct tcmsg {
+ unsigned char tcm_family;
+ unsigned char tcm__pad1;
+ short unsigned int tcm__pad2;
+ int tcm_ifindex;
+ __u32 tcm_handle;
+ __u32 tcm_parent;
+ __u32 tcm_info;
+};
+
+struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+ struct u64_stats_sync syncp;
+};
+
+struct gnet_dump {
+ spinlock_t *lock;
+ struct sk_buff *skb;
+ struct nlattr *tail;
+ int compat_tc_stats;
+ int compat_xstats;
+ int padattr;
+ void *xstats;
+ int xstats_len;
+ struct tc_stats tc_stats;
+};
+
+struct netlink_range_validation {
+ u64 min;
+ u64 max;
+};
+
+struct netlink_range_validation_signed {
+ s64 min;
+ s64 max;
+};
+
+struct nla_policy {
+ u8 type;
+ u8 validation_type;
+ u16 len;
+ union {
+ const u32 bitfield32_valid;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ struct netlink_range_validation *range;
+ struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min;
+ s16 max;
+ };
+ int (*validate)(const struct nlattr *, struct netlink_ext_ack *);
+ u16 strict_start_type;
+ };
+};
+
+struct nl_info {
+ struct nlmsghdr *nlh;
+ struct net *nl_net;
+ u32 portid;
+ u8 skip_notify: 1;
+ u8 skip_notify_kernel: 1;
+};
+
+struct rhash_lock_head {};
+
+enum flow_action_hw_stats_bit {
+ FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT = 1,
+ FLOW_ACTION_HW_STATS_DISABLED_BIT = 2,
+ FLOW_ACTION_HW_STATS_NUM_BITS = 3,
+};
+
+struct flow_block {
+ struct list_head cb_list;
+};
+
+typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *);
+
+struct qdisc_size_table {
+ struct callback_head rcu;
+ struct list_head list;
+ struct tc_sizespec szopts;
+ int refcnt;
+ u16 data[0];
+};
+
+struct Qdisc_class_ops;
+
+struct Qdisc_ops {
+ struct Qdisc_ops *next;
+ const struct Qdisc_class_ops *cl_ops;
+ char id[16];
+ int priv_size;
+ unsigned int static_flags;
+ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
+ struct sk_buff * (*dequeue)(struct Qdisc *);
+ struct sk_buff * (*peek)(struct Qdisc *);
+ int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *);
+ void (*reset)(struct Qdisc *);
+ void (*destroy)(struct Qdisc *);
+ int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *);
+ void (*attach)(struct Qdisc *);
+ int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ int (*dump)(struct Qdisc *, struct sk_buff *);
+ int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+ void (*ingress_block_set)(struct Qdisc *, u32);
+ void (*egress_block_set)(struct Qdisc *, u32);
+ u32 (*ingress_block_get)(struct Qdisc *);
+ u32 (*egress_block_get)(struct Qdisc *);
+ struct module *owner;
+};
+
+struct qdisc_walker;
+
+struct tcf_block;
+
+struct Qdisc_class_ops {
+ unsigned int flags;
+ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
+ int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *);
+ struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int);
+ void (*qlen_notify)(struct Qdisc *, long unsigned int);
+ long unsigned int (*find)(struct Qdisc *, u32);
+ int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *);
+ int (*delete)(struct Qdisc *, long unsigned int);
+ void (*walk)(struct Qdisc *, struct qdisc_walker *);
+ struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *);
+ long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32);
+ void (*unbind_tcf)(struct Qdisc *, long unsigned int);
+ int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *);
+ int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *);
+};
+
+struct tcf_chain;
+
+struct tcf_block {
+ struct mutex lock;
+ struct list_head chain_list;
+ u32 index;
+ u32 classid;
+ refcount_t refcnt;
+ struct net *net;
+ struct Qdisc *q;
+ struct rw_semaphore cb_lock;
+ struct flow_block flow_block;
+ struct list_head owner_list;
+ bool keep_dst;
+ atomic_t offloadcnt;
+ unsigned int nooffloaddevcnt;
+ unsigned int lockeddevcnt;
+ struct {
+ struct tcf_chain *chain;
+ struct list_head filter_chain_list;
+ } chain0;
+ struct callback_head rcu;
+ struct hlist_head proto_destroy_ht[128];
+ struct mutex proto_destroy_lock;
+};
+
+struct tcf_result;
+
+struct tcf_proto_ops;
+
+struct tcf_proto {
+ struct tcf_proto *next;
+ void *root;
+ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);
+ __be16 protocol;
+ u32 prio;
+ void *data;
+ const struct tcf_proto_ops *ops;
+ struct tcf_chain *chain;
+ spinlock_t lock;
+ bool deleting;
+ refcount_t refcnt;
+ struct callback_head rcu;
+ struct hlist_node destroy_ht_node;
+};
+
+struct tcf_result {
+ union {
+ struct {
+ long unsigned int class;
+ u32 classid;
+ };
+ const struct tcf_proto *goto_tp;
+ struct {
+ bool ingress;
+ struct gnet_stats_queue *qstats;
+ };
+ };
+};
+
+struct tcf_walker;
+
+struct tcf_proto_ops {
+ struct list_head head;
+ char kind[16];
+ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);
+ int (*init)(struct tcf_proto *);
+ void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *);
+ void * (*get)(struct tcf_proto *, u32);
+ void (*put)(struct tcf_proto *, void *);
+ int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, bool, bool, struct netlink_ext_ack *);
+ int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *);
+ bool (*delete_empty)(struct tcf_proto *);
+ void (*walk)(struct tcf_proto *, struct tcf_walker *, bool);
+ int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *);
+ void (*hw_add)(struct tcf_proto *, void *);
+ void (*hw_del)(struct tcf_proto *, void *);
+ void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int);
+ void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *);
+ void (*tmplt_destroy)(void *);
+ int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool);
+ int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool);
+ int (*tmplt_dump)(struct sk_buff *, struct net *, void *);
+ struct module *owner;
+ int flags;
+};
+
+struct tcf_chain {
+ struct mutex filter_chain_lock;
+ struct tcf_proto *filter_chain;
+ struct list_head list;
+ struct tcf_block *block;
+ u32 index;
+ unsigned int refcnt;
+ unsigned int action_refcnt;
+ bool explicitly_created;
+ bool flushing;
+ const struct tcf_proto_ops *tmplt_ops;
+ void *tmplt_priv;
+ struct callback_head rcu;
+};
+
+struct sock_fprog_kern {
+ u16 len;
+ struct sock_filter *filter;
+};
+
+struct sk_filter {
+ refcount_t refcnt;
+ struct callback_head rcu;
+ struct bpf_prog *prog;
+};
+
+struct bpf_redirect_info {
+ u32 flags;
+ u32 tgt_index;
+ void *tgt_value;
+ struct bpf_map *map;
+ u32 kern_flags;
+};
+
+enum {
+ NEIGH_VAR_MCAST_PROBES = 0,
+ NEIGH_VAR_UCAST_PROBES = 1,
+ NEIGH_VAR_APP_PROBES = 2,
+ NEIGH_VAR_MCAST_REPROBES = 3,
+ NEIGH_VAR_RETRANS_TIME = 4,
+ NEIGH_VAR_BASE_REACHABLE_TIME = 5,
+ NEIGH_VAR_DELAY_PROBE_TIME = 6,
+ NEIGH_VAR_GC_STALETIME = 7,
+ NEIGH_VAR_QUEUE_LEN_BYTES = 8,
+ NEIGH_VAR_PROXY_QLEN = 9,
+ NEIGH_VAR_ANYCAST_DELAY = 10,
+ NEIGH_VAR_PROXY_DELAY = 11,
+ NEIGH_VAR_LOCKTIME = 12,
+ NEIGH_VAR_QUEUE_LEN = 13,
+ NEIGH_VAR_RETRANS_TIME_MS = 14,
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS = 15,
+ NEIGH_VAR_GC_INTERVAL = 16,
+ NEIGH_VAR_GC_THRESH1 = 17,
+ NEIGH_VAR_GC_THRESH2 = 18,
+ NEIGH_VAR_GC_THRESH3 = 19,
+ NEIGH_VAR_MAX = 20,
+};
+
+struct pneigh_entry;
+
+struct neigh_statistics;
+
+struct neigh_hash_table;
+
+struct neigh_table {
+ int family;
+ unsigned int entry_size;
+ unsigned int key_len;
+ __be16 protocol;
+ __u32 (*hash)(const void *, const struct net_device *, __u32 *);
+ bool (*key_eq)(const struct neighbour *, const void *);
+ int (*constructor)(struct neighbour *);
+ int (*pconstructor)(struct pneigh_entry *);
+ void (*pdestructor)(struct pneigh_entry *);
+ void (*proxy_redo)(struct sk_buff *);
+ bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *);
+ char *id;
+ struct neigh_parms parms;
+ struct list_head parms_list;
+ int gc_interval;
+ int gc_thresh1;
+ int gc_thresh2;
+ int gc_thresh3;
+ long unsigned int last_flush;
+ struct delayed_work gc_work;
+ struct timer_list proxy_timer;
+ struct sk_buff_head proxy_queue;
+ atomic_t entries;
+ atomic_t gc_entries;
+ struct list_head gc_list;
+ rwlock_t lock;
+ long unsigned int last_rand;
+ struct neigh_statistics *stats;
+ struct neigh_hash_table *nht;
+ struct pneigh_entry **phash_buckets;
+};
+
+struct neigh_statistics {
+ long unsigned int allocs;
+ long unsigned int destroys;
+ long unsigned int hash_grows;
+ long unsigned int res_failed;
+ long unsigned int lookups;
+ long unsigned int hits;
+ long unsigned int rcv_probes_mcast;
+ long unsigned int rcv_probes_ucast;
+ long unsigned int periodic_gc_runs;
+ long unsigned int forced_gc_runs;
+ long unsigned int unres_discards;
+ long unsigned int table_fulls;
+};
+
+struct neigh_ops {
+ int family;
+ void (*solicit)(struct neighbour *, struct sk_buff *);
+ void (*error_report)(struct neighbour *, struct sk_buff *);
+ int (*output)(struct neighbour *, struct sk_buff *);
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
+};
+
+struct pneigh_entry {
+ struct pneigh_entry *next;
+ possible_net_t net;
+ struct net_device *dev;
+ u8 flags;
+ u8 protocol;
+ u8 key[0];
+};
+
+struct neigh_hash_table {
+ struct neighbour **hash_buckets;
+ unsigned int hash_shift;
+ __u32 hash_rnd[4];
+ struct callback_head rcu;
+};
+
+struct dst_metrics {
+ u32 metrics[17];
+ refcount_t refcnt;
+};
+
+enum {
+ TCP_ESTABLISHED = 1,
+ TCP_SYN_SENT = 2,
+ TCP_SYN_RECV = 3,
+ TCP_FIN_WAIT1 = 4,
+ TCP_FIN_WAIT2 = 5,
+ TCP_TIME_WAIT = 6,
+ TCP_CLOSE = 7,
+ TCP_CLOSE_WAIT = 8,
+ TCP_LAST_ACK = 9,
+ TCP_LISTEN = 10,
+ TCP_CLOSING = 11,
+ TCP_NEW_SYN_RECV = 12,
+ TCP_MAX_STATES = 13,
+};
+
+struct smc_hashinfo;
+
+struct request_sock_ops;
+
+struct timewait_sock_ops;
+
+struct udp_table;
+
+struct raw_hashinfo;
+
+struct proto {
+ void (*close)(struct sock *, long int);
+ int (*pre_connect)(struct sock *, struct sockaddr *, int);
+ int (*connect)(struct sock *, struct sockaddr *, int);
+ int (*disconnect)(struct sock *, int);
+ struct sock * (*accept)(struct sock *, int, int *, bool);
+ int (*ioctl)(struct sock *, int, long unsigned int);
+ int (*init)(struct sock *);
+ void (*destroy)(struct sock *);
+ void (*shutdown)(struct sock *, int);
+ int (*setsockopt)(struct sock *, int, int, char *, unsigned int);
+ int (*getsockopt)(struct sock *, int, int, char *, int *);
+ void (*keepalive)(struct sock *, int);
+ int (*sendmsg)(struct sock *, struct msghdr *, size_t);
+ int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int, int *);
+ int (*sendpage)(struct sock *, struct page *, int, size_t, int);
+ int (*bind)(struct sock *, struct sockaddr *, int);
+ int (*bind_add)(struct sock *, struct sockaddr *, int);
+ int (*backlog_rcv)(struct sock *, struct sk_buff *);
+ void (*release_cb)(struct sock *);
+ int (*hash)(struct sock *);
+ void (*unhash)(struct sock *);
+ void (*rehash)(struct sock *);
+ int (*get_port)(struct sock *, short unsigned int);
+ unsigned int inuse_idx;
+ bool (*stream_memory_free)(const struct sock *, int);
+ bool (*stream_memory_read)(const struct sock *);
+ void (*enter_memory_pressure)(struct sock *);
+ void (*leave_memory_pressure)(struct sock *);
+ atomic_long_t *memory_allocated;
+ struct percpu_counter *sockets_allocated;
+ long unsigned int *memory_pressure;
+ long int *sysctl_mem;
+ int *sysctl_wmem;
+ int *sysctl_rmem;
+ u32 sysctl_wmem_offset;
+ u32 sysctl_rmem_offset;
+ int max_header;
+ bool no_autobind;
+ struct kmem_cache *slab;
+ unsigned int obj_size;
+ slab_flags_t slab_flags;
+ unsigned int useroffset;
+ unsigned int usersize;
+ struct percpu_counter *orphan_count;
+ struct request_sock_ops *rsk_prot;
+ struct timewait_sock_ops *twsk_prot;
+ union {
+ struct inet_hashinfo *hashinfo;
+ struct udp_table *udp_table;
+ struct raw_hashinfo *raw_hash;
+ struct smc_hashinfo *smc_hash;
+ } h;
+ struct module *owner;
+ char name[32];
+ struct list_head node;
+ int (*diag_destroy)(struct sock *, int);
+};
+
+struct request_sock;
+
+struct request_sock_ops {
+ int family;
+ unsigned int obj_size;
+ struct kmem_cache *slab;
+ char *slab_name;
+ int (*rtx_syn_ack)(const struct sock *, struct request_sock *);
+ void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *);
+ void (*send_reset)(const struct sock *, struct sk_buff *);
+ void (*destructor)(struct request_sock *);
+ void (*syn_ack_timeout)(const struct request_sock *);
+};
+
+struct timewait_sock_ops {
+ struct kmem_cache *twsk_slab;
+ char *twsk_slab_name;
+ unsigned int twsk_obj_size;
+ int (*twsk_unique)(struct sock *, struct sock *, void *);
+ void (*twsk_destructor)(struct sock *);
+};
+
+struct request_sock {
+ struct sock_common __req_common;
+ struct request_sock *dl_next;
+ u16 mss;
+ u8 num_retrans;
+ u8 cookie_ts: 1;
+ u8 num_timeout: 7;
+ u32 ts_recent;
+ struct timer_list rsk_timer;
+ const struct request_sock_ops *rsk_ops;
+ struct sock *sk;
+ u32 *saved_syn;
+ u32 secid;
+ u32 peer_secid;
+};
+
+enum tsq_enum {
+ TSQ_THROTTLED = 0,
+ TSQ_QUEUED = 1,
+ TCP_TSQ_DEFERRED = 2,
+ TCP_WRITE_TIMER_DEFERRED = 3,
+ TCP_DELACK_TIMER_DEFERRED = 4,
+ TCP_MTU_REDUCED_DEFERRED = 5,
+};
+
+struct static_key_false_deferred {
+ struct static_key_false key;
+};
+
+struct ip6_sf_list {
+ struct ip6_sf_list *sf_next;
+ struct in6_addr sf_addr;
+ long unsigned int sf_count[2];
+ unsigned char sf_gsresp;
+ unsigned char sf_oldin;
+ unsigned char sf_crcount;
+};
+
+struct ifmcaddr6 {
+ struct in6_addr mca_addr;
+ struct inet6_dev *idev;
+ struct ifmcaddr6 *next;
+ struct ip6_sf_list *mca_sources;
+ struct ip6_sf_list *mca_tomb;
+ unsigned int mca_sfmode;
+ unsigned char mca_crcount;
+ long unsigned int mca_sfcount[2];
+ struct timer_list mca_timer;
+ unsigned int mca_flags;
+ int mca_users;
+ refcount_t mca_refcnt;
+ spinlock_t mca_lock;
+ long unsigned int mca_cstamp;
+ long unsigned int mca_tstamp;
+};
+
+struct ifacaddr6 {
+ struct in6_addr aca_addr;
+ struct fib6_info *aca_rt;
+ struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
+ int aca_users;
+ refcount_t aca_refcnt;
+ long unsigned int aca_cstamp;
+ long unsigned int aca_tstamp;
+ struct callback_head rcu;
+};
+
+struct fib6_result;
+
+struct fib6_nh;
+
+struct fib6_config;
+
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *);
+ int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *);
+ struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *);
+ int (*ipv6_route_input)(struct sk_buff *);
+ struct fib6_table * (*fib6_get_table)(struct net *, u32);
+ int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int);
+ int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int);
+ void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int);
+ u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *);
+ int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *);
+ void (*fib6_nh_release)(struct fib6_nh *);
+ void (*fib6_update_sernum)(struct net *, struct fib6_info *);
+ int (*ip6_del_rt)(struct net *, struct fib6_info *, bool);
+ void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *);
+ void (*udpv6_encap_enable)();
+ void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool);
+ void (*xfrm6_local_rxpmtu)(struct sk_buff *, u32);
+ int (*xfrm6_udp_encap_rcv)(struct sock *, struct sk_buff *);
+ int (*xfrm6_rcv_encap)(struct sk_buff *, int, __be32, int);
+ struct neigh_table *nd_tbl;
+};
+
+struct fib6_result {
+ struct fib6_nh *nh;
+ struct fib6_info *f6i;
+ u32 fib6_flags;
+ u8 fib6_type;
+ struct rt6_info *rt6;
+};
+
+struct ipv6_bpf_stub {
+ int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32);
+ struct sock * (*udp6_lib_lookup)(struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *);
+};
+
+enum {
+ __ND_OPT_PREFIX_INFO_END = 0,
+ ND_OPT_SOURCE_LL_ADDR = 1,
+ ND_OPT_TARGET_LL_ADDR = 2,
+ ND_OPT_PREFIX_INFO = 3,
+ ND_OPT_REDIRECT_HDR = 4,
+ ND_OPT_MTU = 5,
+ ND_OPT_NONCE = 14,
+ __ND_OPT_ARRAY_MAX = 15,
+ ND_OPT_ROUTE_INFO = 24,
+ ND_OPT_RDNSS = 25,
+ ND_OPT_DNSSL = 31,
+ ND_OPT_6CO = 34,
+ ND_OPT_CAPTIVE_PORTAL = 37,
+ ND_OPT_PREF64 = 38,
+ __ND_OPT_MAX = 39,
+};
+
+struct nd_opt_hdr {
+ __u8 nd_opt_type;
+ __u8 nd_opt_len;
+};
+
+struct ndisc_options {
+ struct nd_opt_hdr *nd_opt_array[15];
+ struct nd_opt_hdr *nd_useropts;
+ struct nd_opt_hdr *nd_useropts_end;
+};
+
+struct prefix_info {
+ __u8 type;
+ __u8 length;
+ __u8 prefix_len;
+ __u8 reserved: 6;
+ __u8 autoconf: 1;
+ __u8 onlink: 1;
+ __be32 valid;
+ __be32 prefered;
+ __be32 reserved2;
+ struct in6_addr prefix;
+};
+
+struct ip6_ra_chain {
+ struct ip6_ra_chain *next;
+ struct sock *sk;
+ int sel;
+ void (*destructor)(struct sock *);
+};
+
+struct rpc_xprt_iter_ops {
+ void (*xpi_rewind)(struct rpc_xprt_iter *);
+ struct rpc_xprt * (*xpi_xprt)(struct rpc_xprt_iter *);
+ struct rpc_xprt * (*xpi_next)(struct rpc_xprt_iter *);
+};
+
+struct rpc_version {
+ u32 number;
+ unsigned int nrprocs;
+ const struct rpc_procinfo *procs;
+ unsigned int *counts;
+};
+
+struct nfs_fh {
+ short unsigned int size;
+ unsigned char data[128];
+};
+
+enum nfs3_stable_how {
+ NFS_UNSTABLE = 0,
+ NFS_DATA_SYNC = 1,
+ NFS_FILE_SYNC = 2,
+ NFS_INVALID_STABLE_HOW = -1,
+};
+
+struct nfs4_label {
+ uint32_t lfs;
+ uint32_t pi;
+ u32 len;
+ char *label;
+};
+
+struct nfs4_stateid_struct {
+ union {
+ char data[16];
+ struct {
+ __be32 seqid;
+ char other[12];
+ };
+ };
+ enum {
+ NFS4_INVALID_STATEID_TYPE = 0,
+ NFS4_SPECIAL_STATEID_TYPE = 1,
+ NFS4_OPEN_STATEID_TYPE = 2,
+ NFS4_LOCK_STATEID_TYPE = 3,
+ NFS4_DELEGATION_STATEID_TYPE = 4,
+ NFS4_LAYOUT_STATEID_TYPE = 5,
+ NFS4_PNFS_DS_STATEID_TYPE = 6,
+ NFS4_REVOKED_STATEID_TYPE = 7,
+ } type;
+};
+
+typedef struct nfs4_stateid_struct nfs4_stateid;
+
+enum nfs_opnum4 {
+ OP_ACCESS = 3,
+ OP_CLOSE = 4,
+ OP_COMMIT = 5,
+ OP_CREATE = 6,
+ OP_DELEGPURGE = 7,
+ OP_DELEGRETURN = 8,
+ OP_GETATTR = 9,
+ OP_GETFH = 10,
+ OP_LINK = 11,
+ OP_LOCK = 12,
+ OP_LOCKT = 13,
+ OP_LOCKU = 14,
+ OP_LOOKUP = 15,
+ OP_LOOKUPP = 16,
+ OP_NVERIFY = 17,
+ OP_OPEN = 18,
+ OP_OPENATTR = 19,
+ OP_OPEN_CONFIRM = 20,
+ OP_OPEN_DOWNGRADE = 21,
+ OP_PUTFH = 22,
+ OP_PUTPUBFH = 23,
+ OP_PUTROOTFH = 24,
+ OP_READ = 25,
+ OP_READDIR = 26,
+ OP_READLINK = 27,
+ OP_REMOVE = 28,
+ OP_RENAME = 29,
+ OP_RENEW = 30,
+ OP_RESTOREFH = 31,
+ OP_SAVEFH = 32,
+ OP_SECINFO = 33,
+ OP_SETATTR = 34,
+ OP_SETCLIENTID = 35,
+ OP_SETCLIENTID_CONFIRM = 36,
+ OP_VERIFY = 37,
+ OP_WRITE = 38,
+ OP_RELEASE_LOCKOWNER = 39,
+ OP_BACKCHANNEL_CTL = 40,
+ OP_BIND_CONN_TO_SESSION = 41,
+ OP_EXCHANGE_ID = 42,
+ OP_CREATE_SESSION = 43,
+ OP_DESTROY_SESSION = 44,
+ OP_FREE_STATEID = 45,
+ OP_GET_DIR_DELEGATION = 46,
+ OP_GETDEVICEINFO = 47,
+ OP_GETDEVICELIST = 48,
+ OP_LAYOUTCOMMIT = 49,
+ OP_LAYOUTGET = 50,
+ OP_LAYOUTRETURN = 51,
+ OP_SECINFO_NO_NAME = 52,
+ OP_SEQUENCE = 53,
+ OP_SET_SSV = 54,
+ OP_TEST_STATEID = 55,
+ OP_WANT_DELEGATION = 56,
+ OP_DESTROY_CLIENTID = 57,
+ OP_RECLAIM_COMPLETE = 58,
+ OP_ALLOCATE = 59,
+ OP_COPY = 60,
+ OP_COPY_NOTIFY = 61,
+ OP_DEALLOCATE = 62,
+ OP_IO_ADVISE = 63,
+ OP_LAYOUTERROR = 64,
+ OP_LAYOUTSTATS = 65,
+ OP_OFFLOAD_CANCEL = 66,
+ OP_OFFLOAD_STATUS = 67,
+ OP_READ_PLUS = 68,
+ OP_SEEK = 69,
+ OP_WRITE_SAME = 70,
+ OP_CLONE = 71,
+ OP_ILLEGAL = 10044,
+};
+
+struct nfs4_string {
+ unsigned int len;
+ char *data;
+};
+
+struct nfs_fsid {
+ uint64_t major;
+ uint64_t minor;
+};
+
+struct nfs4_threshold {
+ __u32 bm;
+ __u32 l_type;
+ __u64 rd_sz;
+ __u64 wr_sz;
+ __u64 rd_io_sz;
+ __u64 wr_io_sz;
+};
+
+struct nfs_fattr {
+ unsigned int valid;
+ umode_t mode;
+ __u32 nlink;
+ kuid_t uid;
+ kgid_t gid;
+ dev_t rdev;
+ __u64 size;
+ union {
+ struct {
+ __u32 blocksize;
+ __u32 blocks;
+ } nfs2;
+ struct {
+ __u64 used;
+ } nfs3;
+ } du;
+ struct nfs_fsid fsid;
+ __u64 fileid;
+ __u64 mounted_on_fileid;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
+ __u64 change_attr;
+ __u64 pre_change_attr;
+ __u64 pre_size;
+ struct timespec64 pre_mtime;
+ struct timespec64 pre_ctime;
+ long unsigned int time_start;
+ long unsigned int gencount;
+ struct nfs4_string *owner_name;
+ struct nfs4_string *group_name;
+ struct nfs4_threshold *mdsthreshold;
+ struct nfs4_label *label;
+};
+
+struct nfs_fsinfo {
+ struct nfs_fattr *fattr;
+ __u32 rtmax;
+ __u32 rtpref;
+ __u32 rtmult;
+ __u32 wtmax;
+ __u32 wtpref;
+ __u32 wtmult;
+ __u32 dtpref;
+ __u64 maxfilesize;
+ struct timespec64 time_delta;
+ __u32 lease_time;
+ __u32 nlayouttypes;
+ __u32 layouttype[8];
+ __u32 blksize;
+ __u32 clone_blksize;
+};
+
+struct nfs_fsstat {
+ struct nfs_fattr *fattr;
+ __u64 tbytes;
+ __u64 fbytes;
+ __u64 abytes;
+ __u64 tfiles;
+ __u64 ffiles;
+ __u64 afiles;
+};
+
+struct nfs_pathconf {
+ struct nfs_fattr *fattr;
+ __u32 max_link;
+ __u32 max_namelen;
+};
+
+struct nfs4_change_info {
+ u32 atomic;
+ u64 before;
+ u64 after;
+};
+
+struct nfs4_slot;
+
+struct nfs4_sequence_args {
+ struct nfs4_slot *sa_slot;
+ u8 sa_cache_this: 1;
+ u8 sa_privileged: 1;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_slot *sr_slot;
+ long unsigned int sr_timestamp;
+ int sr_status;
+ u32 sr_status_flags;
+ u32 sr_highest_slotid;
+ u32 sr_target_highest_slotid;
+};
+
+struct nfs_open_context;
+
+struct nfs_lock_context {
+ refcount_t count;
+ struct list_head list;
+ struct nfs_open_context *open_context;
+ fl_owner_t lockowner;
+ atomic_t io_count;
+ struct callback_head callback_head;
+};
+
+struct nfs4_state;
+
+struct nfs_open_context {
+ struct nfs_lock_context lock_context;
+ fl_owner_t flock_owner;
+ struct dentry *dentry;
+ const struct cred *cred;
+ struct rpc_cred *ll_cred;
+ struct nfs4_state *state;
+ fmode_t mode;
+ long unsigned int flags;
+ int error;
+ struct list_head list;
+ struct nfs4_threshold *mdsthreshold;
+ struct callback_head callback_head;
+};
+
+struct nlm_host;
+
+struct nfs_iostats;
+
+struct nfs_auth_info {
+ unsigned int flavor_len;
+ rpc_authflavor_t flavors[12];
+};
+
+struct nfs_client;
+
+struct nfs_server {
+ struct nfs_client *nfs_client;
+ struct list_head client_link;
+ struct list_head master_link;
+ struct rpc_clnt *client;
+ struct rpc_clnt *client_acl;
+ struct nlm_host *nlm_host;
+ struct nfs_iostats *io_stats;
+ atomic_long_t writeback;
+ int flags;
+ unsigned int caps;
+ unsigned int rsize;
+ unsigned int rpages;
+ unsigned int wsize;
+ unsigned int wpages;
+ unsigned int wtmult;
+ unsigned int dtsize;
+ short unsigned int port;
+ unsigned int bsize;
+ unsigned int acregmin;
+ unsigned int acregmax;
+ unsigned int acdirmin;
+ unsigned int acdirmax;
+ unsigned int namelen;
+ unsigned int options;
+ unsigned int clone_blksize;
+ struct nfs_fsid fsid;
+ __u64 maxfilesize;
+ struct timespec64 time_delta;
+ long unsigned int mount_time;
+ struct super_block *super;
+ dev_t s_dev;
+ struct nfs_auth_info auth_info;
+ u32 pnfs_blksize;
+ struct ida openowner_id;
+ struct ida lockowner_id;
+ struct list_head state_owners_lru;
+ struct list_head layouts;
+ struct list_head delegations;
+ struct list_head ss_copies;
+ long unsigned int mig_gen;
+ long unsigned int mig_status;
+ void (*destroy)(struct nfs_server *);
+ atomic_t active;
+ struct __kernel_sockaddr_storage mountd_address;
+ size_t mountd_addrlen;
+ u32 mountd_version;
+ short unsigned int mountd_port;
+ short unsigned int mountd_protocol;
+ struct rpc_wait_queue uoc_rpcwaitq;
+ unsigned int read_hdrsize;
+ const struct cred *cred;
+};
+
+struct nfs_subversion;
+
+struct nfs_rpc_ops;
+
+struct nfs_client {
+ refcount_t cl_count;
+ atomic_t cl_mds_count;
+ int cl_cons_state;
+ long unsigned int cl_res_state;
+ long unsigned int cl_flags;
+ struct __kernel_sockaddr_storage cl_addr;
+ size_t cl_addrlen;
+ char *cl_hostname;
+ char *cl_acceptor;
+ struct list_head cl_share_link;
+ struct list_head cl_superblocks;
+ struct rpc_clnt *cl_rpcclient;
+ const struct nfs_rpc_ops *rpc_ops;
+ int cl_proto;
+ struct nfs_subversion *cl_nfs_mod;
+ u32 cl_minorversion;
+ unsigned int cl_nconnect;
+ const char *cl_principal;
+ char cl_ipaddr[48];
+ struct net *cl_net;
+ struct list_head pending_cb_stateids;
+};
+
+struct pnfs_layout_segment;
+
+struct nfs_write_verifier {
+ char data[8];
+};
+
+struct nfs_writeverf {
+ struct nfs_write_verifier verifier;
+ enum nfs3_stable_how committed;
+};
+
+struct nfs_pgio_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ struct nfs_open_context *context;
+ struct nfs_lock_context *lock_context;
+ nfs4_stateid stateid;
+ __u64 offset;
+ __u32 count;
+ unsigned int pgbase;
+ struct page **pages;
+ union {
+ unsigned int replen;
+ struct {
+ const u32 *bitmask;
+ enum nfs3_stable_how stable;
+ };
+ };
+};
+
+struct nfs_pgio_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fattr *fattr;
+ __u32 count;
+ __u32 op_status;
+ union {
+ struct {
+ unsigned int replen;
+ int eof;
+ };
+ struct {
+ struct nfs_writeverf *verf;
+ const struct nfs_server *server;
+ };
+ };
+};
+
+struct nfs_commitargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ __u64 offset;
+ __u32 count;
+ const u32 *bitmask;
+};
+
+struct nfs_commitres {
+ struct nfs4_sequence_res seq_res;
+ __u32 op_status;
+ struct nfs_fattr *fattr;
+ struct nfs_writeverf *verf;
+ const struct nfs_server *server;
+};
+
+struct nfs_removeargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ struct qstr name;
+};
+
+struct nfs_removeres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_server *server;
+ struct nfs_fattr *dir_attr;
+ struct nfs4_change_info cinfo;
+};
+
+struct nfs_renameargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *old_dir;
+ const struct nfs_fh *new_dir;
+ const struct qstr *old_name;
+ const struct qstr *new_name;
+};
+
+struct nfs_renameres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_server *server;
+ struct nfs4_change_info old_cinfo;
+ struct nfs_fattr *old_fattr;
+ struct nfs4_change_info new_cinfo;
+ struct nfs_fattr *new_fattr;
+};
+
+struct nfs_entry {
+ __u64 ino;
+ __u64 cookie;
+ __u64 prev_cookie;
+ const char *name;
+ unsigned int len;
+ int eof;
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
+ struct nfs4_label *label;
+ unsigned char d_type;
+ struct nfs_server *server;
+};
+
+struct pnfs_ds_commit_info {};
+
+struct nfs_page_array {
+ struct page **pagevec;
+ unsigned int npages;
+ struct page *page_array[8];
+};
+
+struct nfs_page;
+
+struct nfs_rw_ops;
+
+struct nfs_io_completion;
+
+struct nfs_direct_req;
+
+struct nfs_pgio_completion_ops;
+
+struct nfs_pgio_header {
+ struct inode *inode;
+ const struct cred *cred;
+ struct list_head pages;
+ struct nfs_page *req;
+ struct nfs_writeverf verf;
+ fmode_t rw_mode;
+ struct pnfs_layout_segment *lseg;
+ loff_t io_start;
+ const struct rpc_call_ops *mds_ops;
+ void (*release)(struct nfs_pgio_header *);
+ const struct nfs_pgio_completion_ops *completion_ops;
+ const struct nfs_rw_ops *rw_ops;
+ struct nfs_io_completion *io_completion;
+ struct nfs_direct_req *dreq;
+ int pnfs_error;
+ int error;
+ unsigned int good_bytes;
+ long unsigned int flags;
+ struct rpc_task task;
+ struct nfs_fattr fattr;
+ struct nfs_pgio_args args;
+ struct nfs_pgio_res res;
+ long unsigned int timestamp;
+ int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *);
+ __u64 mds_offset;
+ struct nfs_page_array page_array;
+ struct nfs_client *ds_clp;
+ int ds_commit_idx;
+ int pgio_mirror_idx;
+};
+
+struct nfs_pgio_completion_ops {
+ void (*error_cleanup)(struct list_head *, int);
+ void (*init_hdr)(struct nfs_pgio_header *);
+ void (*completion)(struct nfs_pgio_header *);
+ void (*reschedule_io)(struct nfs_pgio_header *);
+};
+
+struct nfs_mds_commit_info {
+ atomic_t rpcs_out;
+ atomic_long_t ncommit;
+ struct list_head list;
+};
+
+struct nfs_commit_data;
+
+struct nfs_commit_info;
+
+struct nfs_commit_completion_ops {
+ void (*completion)(struct nfs_commit_data *);
+ void (*resched_write)(struct nfs_commit_info *, struct nfs_page *);
+};
+
+struct nfs_commit_data {
+ struct rpc_task task;
+ struct inode *inode;
+ const struct cred *cred;
+ struct nfs_fattr fattr;
+ struct nfs_writeverf verf;
+ struct list_head pages;
+ struct list_head list;
+ struct nfs_direct_req *dreq;
+ struct nfs_commitargs args;
+ struct nfs_commitres res;
+ struct nfs_open_context *context;
+ struct pnfs_layout_segment *lseg;
+ struct nfs_client *ds_clp;
+ int ds_commit_index;
+ loff_t lwb;
+ const struct rpc_call_ops *mds_ops;
+ const struct nfs_commit_completion_ops *completion_ops;
+ int (*commit_done_cb)(struct rpc_task *, struct nfs_commit_data *);
+ long unsigned int flags;
+};
+
+struct nfs_commit_info {
+ struct inode *inode;
+ struct nfs_mds_commit_info *mds;
+ struct pnfs_ds_commit_info *ds;
+ struct nfs_direct_req *dreq;
+ const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_unlinkdata {
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct dentry *dentry;
+ wait_queue_head_t wq;
+ const struct cred *cred;
+ struct nfs_fattr dir_attr;
+ long int timeout;
+};
+
+struct nfs_renamedata {
+ struct nfs_renameargs args;
+ struct nfs_renameres res;
+ const struct cred *cred;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct nfs_fattr old_fattr;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct nfs_fattr new_fattr;
+ void (*complete)(struct rpc_task *, struct nfs_renamedata *);
+ long int timeout;
+ bool cancelled;
+};
+
+struct nlmclnt_operations;
+
+struct nfs_client_initdata;
+
+struct nfs_access_entry;
+
+struct nfs_rpc_ops {
+ u32 version;
+ const struct dentry_operations *dentry_ops;
+ const struct inode_operations *dir_inode_ops;
+ const struct inode_operations *file_inode_ops;
+ const struct file_operations *file_ops;
+ const struct nlmclnt_operations *nlmclnt_ops;
+ int (*getroot)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+ int (*submount)(struct fs_context *, struct nfs_server *);
+ int (*try_get_tree)(struct fs_context *);
+ int (*getattr)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *, struct inode *);
+ int (*setattr)(struct dentry *, struct nfs_fattr *, struct iattr *);
+ int (*lookup)(struct inode *, struct dentry *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *);
+ int (*lookupp)(struct inode *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *);
+ int (*access)(struct inode *, struct nfs_access_entry *);
+ int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int);
+ int (*create)(struct inode *, struct dentry *, struct iattr *, int);
+ int (*remove)(struct inode *, struct dentry *);
+ void (*unlink_setup)(struct rpc_message *, struct dentry *, struct inode *);
+ void (*unlink_rpc_prepare)(struct rpc_task *, struct nfs_unlinkdata *);
+ int (*unlink_done)(struct rpc_task *, struct inode *);
+ void (*rename_setup)(struct rpc_message *, struct dentry *, struct dentry *);
+ void (*rename_rpc_prepare)(struct rpc_task *, struct nfs_renamedata *);
+ int (*rename_done)(struct rpc_task *, struct inode *, struct inode *);
+ int (*link)(struct inode *, struct inode *, const struct qstr *);
+ int (*symlink)(struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *);
+ int (*mkdir)(struct inode *, struct dentry *, struct iattr *);
+ int (*rmdir)(struct inode *, const struct qstr *);
+ int (*readdir)(struct dentry *, const struct cred *, u64, struct page **, unsigned int, bool);
+ int (*mknod)(struct inode *, struct dentry *, struct iattr *, dev_t);
+ int (*statfs)(struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *);
+ int (*fsinfo)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+ int (*pathconf)(struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *);
+ int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool);
+ int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
+ int (*read_done)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, struct rpc_clnt **);
+ int (*write_done)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*commit_setup)(struct nfs_commit_data *, struct rpc_message *, struct rpc_clnt **);
+ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+ int (*commit_done)(struct rpc_task *, struct nfs_commit_data *);
+ int (*lock)(struct file *, int, struct file_lock *);
+ int (*lock_check_bounds)(const struct file_lock *);
+ void (*clear_acl_cache)(struct inode *);
+ void (*close_context)(struct nfs_open_context *, int);
+ struct inode * (*open_context)(struct inode *, struct nfs_open_context *, int, struct iattr *, int *);
+ int (*have_delegation)(struct inode *, fmode_t);
+ struct nfs_client * (*alloc_client)(const struct nfs_client_initdata *);
+ struct nfs_client * (*init_client)(struct nfs_client *, const struct nfs_client_initdata *);
+ void (*free_client)(struct nfs_client *);
+ struct nfs_server * (*create_server)(struct fs_context *);
+ struct nfs_server * (*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t);
+};
+
+struct nfs_access_entry {
+ struct rb_node rb_node;
+ struct list_head lru;
+ const struct cred *cred;
+ __u32 mask;
+ struct callback_head callback_head;
+};
+
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0,
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1,
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2,
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3,
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4,
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5,
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6,
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7,
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8,
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9,
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10,
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11,
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12,
+ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13,
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14,
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15,
+ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16,
+ PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17,
+ PERF_SAMPLE_BRANCH_MAX_SHIFT = 18,
+};
+
+enum exception_stack_ordering {
+ ESTACK_DF = 0,
+ ESTACK_NMI = 1,
+ ESTACK_DB = 2,
+ ESTACK_MCE = 3,
+ N_EXCEPTION_STACKS = 4,
+};
+
+enum {
+ TSK_TRACE_FL_TRACE_BIT = 0,
+ TSK_TRACE_FL_GRAPH_BIT = 1,
+};
+
+struct uuidcmp {
+ const char *uuid;
+ int len;
+};
+
+typedef __u32 __le32;
+
+typedef __u64 __le64;
+
+struct minix_super_block {
+ __u16 s_ninodes;
+ __u16 s_nzones;
+ __u16 s_imap_blocks;
+ __u16 s_zmap_blocks;
+ __u16 s_firstdatazone;
+ __u16 s_log_zone_size;
+ __u32 s_max_size;
+ __u16 s_magic;
+ __u16 s_state;
+ __u32 s_zones;
+};
+
+struct romfs_super_block {
+ __be32 word0;
+ __be32 word1;
+ __be32 size;
+ __be32 checksum;
+ char name[0];
+};
+
+struct cramfs_inode {
+ __u32 mode: 16;
+ __u32 uid: 16;
+ __u32 size: 24;
+ __u32 gid: 8;
+ __u32 namelen: 6;
+ __u32 offset: 26;
+};
+
+struct cramfs_info {
+ __u32 crc;
+ __u32 edition;
+ __u32 blocks;
+ __u32 files;
+};
+
+struct cramfs_super {
+ __u32 magic;
+ __u32 size;
+ __u32 flags;
+ __u32 future;
+ __u8 signature[16];
+ struct cramfs_info fsid;
+ __u8 name[16];
+ struct cramfs_inode root;
+};
+
+struct squashfs_super_block {
+ __le32 s_magic;
+ __le32 inodes;
+ __le32 mkfs_time;
+ __le32 block_size;
+ __le32 fragments;
+ __le16 compression;
+ __le16 block_log;
+ __le16 flags;
+ __le16 no_ids;
+ __le16 s_major;
+ __le16 s_minor;
+ __le64 root_inode;
+ __le64 bytes_used;
+ __le64 id_table_start;
+ __le64 xattr_id_table_start;
+ __le64 inode_table_start;
+ __le64 directory_table_start;
+ __le64 fragment_table_start;
+ __le64 lookup_table_start;
+};
+
+typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *));
+
+struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+ const char *path;
+ char **argv;
+ char **envp;
+ struct file *file;
+ int wait;
+ int retval;
+ pid_t pid;
+ int (*init)(struct subprocess_info *, struct cred *);
+ void (*cleanup)(struct subprocess_info *);
+ void *data;
+};
+
+struct linux_dirent64 {
+ u64 d_ino;
+ s64 d_off;
+ short unsigned int d_reclen;
+ unsigned char d_type;
+ char d_name[0];
+};
+
+struct hash {
+ int ino;
+ int minor;
+ int major;
+ umode_t mode;
+ struct hash *next;
+ char name[4098];
+};
+
+struct dir_entry {
+ struct list_head list;
+ char *name;
+ time64_t mtime;
+};
+
+enum state {
+ Start = 0,
+ Collect = 1,
+ GotHeader = 2,
+ SkipIt = 3,
+ GotName = 4,
+ CopyFile = 5,
+ GotSymlink = 6,
+ Reset = 7,
+};
+
+enum ucount_type {
+ UCOUNT_USER_NAMESPACES = 0,
+ UCOUNT_PID_NAMESPACES = 1,
+ UCOUNT_UTS_NAMESPACES = 2,
+ UCOUNT_IPC_NAMESPACES = 3,
+ UCOUNT_NET_NAMESPACES = 4,
+ UCOUNT_MNT_NAMESPACES = 5,
+ UCOUNT_CGROUP_NAMESPACES = 6,
+ UCOUNT_TIME_NAMESPACES = 7,
+ UCOUNT_INOTIFY_INSTANCES = 8,
+ UCOUNT_INOTIFY_WATCHES = 9,
+ UCOUNT_COUNTS = 10,
+};
+
+enum flow_dissector_key_id {
+ FLOW_DISSECTOR_KEY_CONTROL = 0,
+ FLOW_DISSECTOR_KEY_BASIC = 1,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3,
+ FLOW_DISSECTOR_KEY_PORTS = 4,
+ FLOW_DISSECTOR_KEY_PORTS_RANGE = 5,
+ FLOW_DISSECTOR_KEY_ICMP = 6,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS = 7,
+ FLOW_DISSECTOR_KEY_TIPC = 8,
+ FLOW_DISSECTOR_KEY_ARP = 9,
+ FLOW_DISSECTOR_KEY_VLAN = 10,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL = 11,
+ FLOW_DISSECTOR_KEY_GRE_KEYID = 12,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13,
+ FLOW_DISSECTOR_KEY_ENC_KEYID = 14,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL = 17,
+ FLOW_DISSECTOR_KEY_ENC_PORTS = 18,
+ FLOW_DISSECTOR_KEY_MPLS = 19,
+ FLOW_DISSECTOR_KEY_TCP = 20,
+ FLOW_DISSECTOR_KEY_IP = 21,
+ FLOW_DISSECTOR_KEY_CVLAN = 22,
+ FLOW_DISSECTOR_KEY_ENC_IP = 23,
+ FLOW_DISSECTOR_KEY_ENC_OPTS = 24,
+ FLOW_DISSECTOR_KEY_META = 25,
+ FLOW_DISSECTOR_KEY_CT = 26,
+ FLOW_DISSECTOR_KEY_MAX = 27,
+};
+
+enum {
+ IPSTATS_MIB_NUM = 0,
+ IPSTATS_MIB_INPKTS = 1,
+ IPSTATS_MIB_INOCTETS = 2,
+ IPSTATS_MIB_INDELIVERS = 3,
+ IPSTATS_MIB_OUTFORWDATAGRAMS = 4,
+ IPSTATS_MIB_OUTPKTS = 5,
+ IPSTATS_MIB_OUTOCTETS = 6,
+ IPSTATS_MIB_INHDRERRORS = 7,
+ IPSTATS_MIB_INTOOBIGERRORS = 8,
+ IPSTATS_MIB_INNOROUTES = 9,
+ IPSTATS_MIB_INADDRERRORS = 10,
+ IPSTATS_MIB_INUNKNOWNPROTOS = 11,
+ IPSTATS_MIB_INTRUNCATEDPKTS = 12,
+ IPSTATS_MIB_INDISCARDS = 13,
+ IPSTATS_MIB_OUTDISCARDS = 14,
+ IPSTATS_MIB_OUTNOROUTES = 15,
+ IPSTATS_MIB_REASMTIMEOUT = 16,
+ IPSTATS_MIB_REASMREQDS = 17,
+ IPSTATS_MIB_REASMOKS = 18,
+ IPSTATS_MIB_REASMFAILS = 19,
+ IPSTATS_MIB_FRAGOKS = 20,
+ IPSTATS_MIB_FRAGFAILS = 21,
+ IPSTATS_MIB_FRAGCREATES = 22,
+ IPSTATS_MIB_INMCASTPKTS = 23,
+ IPSTATS_MIB_OUTMCASTPKTS = 24,
+ IPSTATS_MIB_INBCASTPKTS = 25,
+ IPSTATS_MIB_OUTBCASTPKTS = 26,
+ IPSTATS_MIB_INMCASTOCTETS = 27,
+ IPSTATS_MIB_OUTMCASTOCTETS = 28,
+ IPSTATS_MIB_INBCASTOCTETS = 29,
+ IPSTATS_MIB_OUTBCASTOCTETS = 30,
+ IPSTATS_MIB_CSUMERRORS = 31,
+ IPSTATS_MIB_NOECTPKTS = 32,
+ IPSTATS_MIB_ECT1PKTS = 33,
+ IPSTATS_MIB_ECT0PKTS = 34,
+ IPSTATS_MIB_CEPKTS = 35,
+ IPSTATS_MIB_REASM_OVERLAPS = 36,
+ __IPSTATS_MIB_MAX = 37,
+};
+
+enum {
+ ICMP_MIB_NUM = 0,
+ ICMP_MIB_INMSGS = 1,
+ ICMP_MIB_INERRORS = 2,
+ ICMP_MIB_INDESTUNREACHS = 3,
+ ICMP_MIB_INTIMEEXCDS = 4,
+ ICMP_MIB_INPARMPROBS = 5,
+ ICMP_MIB_INSRCQUENCHS = 6,
+ ICMP_MIB_INREDIRECTS = 7,
+ ICMP_MIB_INECHOS = 8,
+ ICMP_MIB_INECHOREPS = 9,
+ ICMP_MIB_INTIMESTAMPS = 10,
+ ICMP_MIB_INTIMESTAMPREPS = 11,
+ ICMP_MIB_INADDRMASKS = 12,
+ ICMP_MIB_INADDRMASKREPS = 13,
+ ICMP_MIB_OUTMSGS = 14,
+ ICMP_MIB_OUTERRORS = 15,
+ ICMP_MIB_OUTDESTUNREACHS = 16,
+ ICMP_MIB_OUTTIMEEXCDS = 17,
+ ICMP_MIB_OUTPARMPROBS = 18,
+ ICMP_MIB_OUTSRCQUENCHS = 19,
+ ICMP_MIB_OUTREDIRECTS = 20,
+ ICMP_MIB_OUTECHOS = 21,
+ ICMP_MIB_OUTECHOREPS = 22,
+ ICMP_MIB_OUTTIMESTAMPS = 23,
+ ICMP_MIB_OUTTIMESTAMPREPS = 24,
+ ICMP_MIB_OUTADDRMASKS = 25,
+ ICMP_MIB_OUTADDRMASKREPS = 26,
+ ICMP_MIB_CSUMERRORS = 27,
+ __ICMP_MIB_MAX = 28,
+};
+
+enum {
+ ICMP6_MIB_NUM = 0,
+ ICMP6_MIB_INMSGS = 1,
+ ICMP6_MIB_INERRORS = 2,
+ ICMP6_MIB_OUTMSGS = 3,
+ ICMP6_MIB_OUTERRORS = 4,
+ ICMP6_MIB_CSUMERRORS = 5,
+ __ICMP6_MIB_MAX = 6,
+};
+
+enum {
+ TCP_MIB_NUM = 0,
+ TCP_MIB_RTOALGORITHM = 1,
+ TCP_MIB_RTOMIN = 2,
+ TCP_MIB_RTOMAX = 3,
+ TCP_MIB_MAXCONN = 4,
+ TCP_MIB_ACTIVEOPENS = 5,
+ TCP_MIB_PASSIVEOPENS = 6,
+ TCP_MIB_ATTEMPTFAILS = 7,
+ TCP_MIB_ESTABRESETS = 8,
+ TCP_MIB_CURRESTAB = 9,
+ TCP_MIB_INSEGS = 10,
+ TCP_MIB_OUTSEGS = 11,
+ TCP_MIB_RETRANSSEGS = 12,
+ TCP_MIB_INERRS = 13,
+ TCP_MIB_OUTRSTS = 14,
+ TCP_MIB_CSUMERRORS = 15,
+ __TCP_MIB_MAX = 16,
+};
+
+enum {
+ UDP_MIB_NUM = 0,
+ UDP_MIB_INDATAGRAMS = 1,
+ UDP_MIB_NOPORTS = 2,
+ UDP_MIB_INERRORS = 3,
+ UDP_MIB_OUTDATAGRAMS = 4,
+ UDP_MIB_RCVBUFERRORS = 5,
+ UDP_MIB_SNDBUFERRORS = 6,
+ UDP_MIB_CSUMERRORS = 7,
+ UDP_MIB_IGNOREDMULTI = 8,
+ __UDP_MIB_MAX = 9,
+};
+
+enum {
+ LINUX_MIB_NUM = 0,
+ LINUX_MIB_SYNCOOKIESSENT = 1,
+ LINUX_MIB_SYNCOOKIESRECV = 2,
+ LINUX_MIB_SYNCOOKIESFAILED = 3,
+ LINUX_MIB_EMBRYONICRSTS = 4,
+ LINUX_MIB_PRUNECALLED = 5,
+ LINUX_MIB_RCVPRUNED = 6,
+ LINUX_MIB_OFOPRUNED = 7,
+ LINUX_MIB_OUTOFWINDOWICMPS = 8,
+ LINUX_MIB_LOCKDROPPEDICMPS = 9,
+ LINUX_MIB_ARPFILTER = 10,
+ LINUX_MIB_TIMEWAITED = 11,
+ LINUX_MIB_TIMEWAITRECYCLED = 12,
+ LINUX_MIB_TIMEWAITKILLED = 13,
+ LINUX_MIB_PAWSACTIVEREJECTED = 14,
+ LINUX_MIB_PAWSESTABREJECTED = 15,
+ LINUX_MIB_DELAYEDACKS = 16,
+ LINUX_MIB_DELAYEDACKLOCKED = 17,
+ LINUX_MIB_DELAYEDACKLOST = 18,
+ LINUX_MIB_LISTENOVERFLOWS = 19,
+ LINUX_MIB_LISTENDROPS = 20,
+ LINUX_MIB_TCPHPHITS = 21,
+ LINUX_MIB_TCPPUREACKS = 22,
+ LINUX_MIB_TCPHPACKS = 23,
+ LINUX_MIB_TCPRENORECOVERY = 24,
+ LINUX_MIB_TCPSACKRECOVERY = 25,
+ LINUX_MIB_TCPSACKRENEGING = 26,
+ LINUX_MIB_TCPSACKREORDER = 27,
+ LINUX_MIB_TCPRENOREORDER = 28,
+ LINUX_MIB_TCPTSREORDER = 29,
+ LINUX_MIB_TCPFULLUNDO = 30,
+ LINUX_MIB_TCPPARTIALUNDO = 31,
+ LINUX_MIB_TCPDSACKUNDO = 32,
+ LINUX_MIB_TCPLOSSUNDO = 33,
+ LINUX_MIB_TCPLOSTRETRANSMIT = 34,
+ LINUX_MIB_TCPRENOFAILURES = 35,
+ LINUX_MIB_TCPSACKFAILURES = 36,
+ LINUX_MIB_TCPLOSSFAILURES = 37,
+ LINUX_MIB_TCPFASTRETRANS = 38,
+ LINUX_MIB_TCPSLOWSTARTRETRANS = 39,
+ LINUX_MIB_TCPTIMEOUTS = 40,
+ LINUX_MIB_TCPLOSSPROBES = 41,
+ LINUX_MIB_TCPLOSSPROBERECOVERY = 42,
+ LINUX_MIB_TCPRENORECOVERYFAIL = 43,
+ LINUX_MIB_TCPSACKRECOVERYFAIL = 44,
+ LINUX_MIB_TCPRCVCOLLAPSED = 45,
+ LINUX_MIB_TCPDSACKOLDSENT = 46,
+ LINUX_MIB_TCPDSACKOFOSENT = 47,
+ LINUX_MIB_TCPDSACKRECV = 48,
+ LINUX_MIB_TCPDSACKOFORECV = 49,
+ LINUX_MIB_TCPABORTONDATA = 50,
+ LINUX_MIB_TCPABORTONCLOSE = 51,
+ LINUX_MIB_TCPABORTONMEMORY = 52,
+ LINUX_MIB_TCPABORTONTIMEOUT = 53,
+ LINUX_MIB_TCPABORTONLINGER = 54,
+ LINUX_MIB_TCPABORTFAILED = 55,
+ LINUX_MIB_TCPMEMORYPRESSURES = 56,
+ LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57,
+ LINUX_MIB_TCPSACKDISCARD = 58,
+ LINUX_MIB_TCPDSACKIGNOREDOLD = 59,
+ LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60,
+ LINUX_MIB_TCPSPURIOUSRTOS = 61,
+ LINUX_MIB_TCPMD5NOTFOUND = 62,
+ LINUX_MIB_TCPMD5UNEXPECTED = 63,
+ LINUX_MIB_TCPMD5FAILURE = 64,
+ LINUX_MIB_SACKSHIFTED = 65,
+ LINUX_MIB_SACKMERGED = 66,
+ LINUX_MIB_SACKSHIFTFALLBACK = 67,
+ LINUX_MIB_TCPBACKLOGDROP = 68,
+ LINUX_MIB_PFMEMALLOCDROP = 69,
+ LINUX_MIB_TCPMINTTLDROP = 70,
+ LINUX_MIB_TCPDEFERACCEPTDROP = 71,
+ LINUX_MIB_IPRPFILTER = 72,
+ LINUX_MIB_TCPTIMEWAITOVERFLOW = 73,
+ LINUX_MIB_TCPREQQFULLDOCOOKIES = 74,
+ LINUX_MIB_TCPREQQFULLDROP = 75,
+ LINUX_MIB_TCPRETRANSFAIL = 76,
+ LINUX_MIB_TCPRCVCOALESCE = 77,
+ LINUX_MIB_TCPBACKLOGCOALESCE = 78,
+ LINUX_MIB_TCPOFOQUEUE = 79,
+ LINUX_MIB_TCPOFODROP = 80,
+ LINUX_MIB_TCPOFOMERGE = 81,
+ LINUX_MIB_TCPCHALLENGEACK = 82,
+ LINUX_MIB_TCPSYNCHALLENGE = 83,
+ LINUX_MIB_TCPFASTOPENACTIVE = 84,
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85,
+ LINUX_MIB_TCPFASTOPENPASSIVE = 86,
+ LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87,
+ LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88,
+ LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89,
+ LINUX_MIB_TCPFASTOPENBLACKHOLE = 90,
+ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91,
+ LINUX_MIB_BUSYPOLLRXPACKETS = 92,
+ LINUX_MIB_TCPAUTOCORKING = 93,
+ LINUX_MIB_TCPFROMZEROWINDOWADV = 94,
+ LINUX_MIB_TCPTOZEROWINDOWADV = 95,
+ LINUX_MIB_TCPWANTZEROWINDOWADV = 96,
+ LINUX_MIB_TCPSYNRETRANS = 97,
+ LINUX_MIB_TCPORIGDATASENT = 98,
+ LINUX_MIB_TCPHYSTARTTRAINDETECT = 99,
+ LINUX_MIB_TCPHYSTARTTRAINCWND = 100,
+ LINUX_MIB_TCPHYSTARTDELAYDETECT = 101,
+ LINUX_MIB_TCPHYSTARTDELAYCWND = 102,
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103,
+ LINUX_MIB_TCPACKSKIPPEDPAWS = 104,
+ LINUX_MIB_TCPACKSKIPPEDSEQ = 105,
+ LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106,
+ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107,
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108,
+ LINUX_MIB_TCPWINPROBE = 109,
+ LINUX_MIB_TCPKEEPALIVE = 110,
+ LINUX_MIB_TCPMTUPFAIL = 111,
+ LINUX_MIB_TCPMTUPSUCCESS = 112,
+ LINUX_MIB_TCPDELIVERED = 113,
+ LINUX_MIB_TCPDELIVEREDCE = 114,
+ LINUX_MIB_TCPACKCOMPRESSED = 115,
+ LINUX_MIB_TCPZEROWINDOWDROP = 116,
+ LINUX_MIB_TCPRCVQDROP = 117,
+ LINUX_MIB_TCPWQUEUETOOBIG = 118,
+ LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119,
+ LINUX_MIB_TCPTIMEOUTREHASH = 120,
+ LINUX_MIB_TCPDUPLICATEDATAREHASH = 121,
+ __LINUX_MIB_MAX = 122,
+};
+
+enum {
+ LINUX_MIB_XFRMNUM = 0,
+ LINUX_MIB_XFRMINERROR = 1,
+ LINUX_MIB_XFRMINBUFFERERROR = 2,
+ LINUX_MIB_XFRMINHDRERROR = 3,
+ LINUX_MIB_XFRMINNOSTATES = 4,
+ LINUX_MIB_XFRMINSTATEPROTOERROR = 5,
+ LINUX_MIB_XFRMINSTATEMODEERROR = 6,
+ LINUX_MIB_XFRMINSTATESEQERROR = 7,
+ LINUX_MIB_XFRMINSTATEEXPIRED = 8,
+ LINUX_MIB_XFRMINSTATEMISMATCH = 9,
+ LINUX_MIB_XFRMINSTATEINVALID = 10,
+ LINUX_MIB_XFRMINTMPLMISMATCH = 11,
+ LINUX_MIB_XFRMINNOPOLS = 12,
+ LINUX_MIB_XFRMINPOLBLOCK = 13,
+ LINUX_MIB_XFRMINPOLERROR = 14,
+ LINUX_MIB_XFRMOUTERROR = 15,
+ LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16,
+ LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17,
+ LINUX_MIB_XFRMOUTNOSTATES = 18,
+ LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19,
+ LINUX_MIB_XFRMOUTSTATEMODEERROR = 20,
+ LINUX_MIB_XFRMOUTSTATESEQERROR = 21,
+ LINUX_MIB_XFRMOUTSTATEEXPIRED = 22,
+ LINUX_MIB_XFRMOUTPOLBLOCK = 23,
+ LINUX_MIB_XFRMOUTPOLDEAD = 24,
+ LINUX_MIB_XFRMOUTPOLERROR = 25,
+ LINUX_MIB_XFRMFWDHDRERROR = 26,
+ LINUX_MIB_XFRMOUTSTATEINVALID = 27,
+ LINUX_MIB_XFRMACQUIREERROR = 28,
+ __LINUX_MIB_XFRMMAX = 29,
+};
+
+enum {
+ LINUX_MIB_TLSNUM = 0,
+ LINUX_MIB_TLSCURRTXSW = 1,
+ LINUX_MIB_TLSCURRRXSW = 2,
+ LINUX_MIB_TLSCURRTXDEVICE = 3,
+ LINUX_MIB_TLSCURRRXDEVICE = 4,
+ LINUX_MIB_TLSTXSW = 5,
+ LINUX_MIB_TLSRXSW = 6,
+ LINUX_MIB_TLSTXDEVICE = 7,
+ LINUX_MIB_TLSRXDEVICE = 8,
+ LINUX_MIB_TLSDECRYPTERROR = 9,
+ LINUX_MIB_TLSRXDEVICERESYNC = 10,
+ __LINUX_MIB_TLSMAX = 11,
+};
+
+enum nf_inet_hooks {
+ NF_INET_PRE_ROUTING = 0,
+ NF_INET_LOCAL_IN = 1,
+ NF_INET_FORWARD = 2,
+ NF_INET_LOCAL_OUT = 3,
+ NF_INET_POST_ROUTING = 4,
+ NF_INET_NUMHOOKS = 5,
+};
+
+enum {
+ NFPROTO_UNSPEC = 0,
+ NFPROTO_INET = 1,
+ NFPROTO_IPV4 = 2,
+ NFPROTO_ARP = 3,
+ NFPROTO_NETDEV = 5,
+ NFPROTO_BRIDGE = 7,
+ NFPROTO_IPV6 = 10,
+ NFPROTO_DECNET = 12,
+ NFPROTO_NUMPROTO = 13,
+};
+
+enum tcp_conntrack {
+ TCP_CONNTRACK_NONE = 0,
+ TCP_CONNTRACK_SYN_SENT = 1,
+ TCP_CONNTRACK_SYN_RECV = 2,
+ TCP_CONNTRACK_ESTABLISHED = 3,
+ TCP_CONNTRACK_FIN_WAIT = 4,
+ TCP_CONNTRACK_CLOSE_WAIT = 5,
+ TCP_CONNTRACK_LAST_ACK = 6,
+ TCP_CONNTRACK_TIME_WAIT = 7,
+ TCP_CONNTRACK_CLOSE = 8,
+ TCP_CONNTRACK_LISTEN = 9,
+ TCP_CONNTRACK_MAX = 10,
+ TCP_CONNTRACK_IGNORE = 11,
+ TCP_CONNTRACK_RETRANS = 12,
+ TCP_CONNTRACK_UNACK = 13,
+ TCP_CONNTRACK_TIMEOUT_MAX = 14,
+};
+
+enum ct_dccp_states {
+ CT_DCCP_NONE = 0,
+ CT_DCCP_REQUEST = 1,
+ CT_DCCP_RESPOND = 2,
+ CT_DCCP_PARTOPEN = 3,
+ CT_DCCP_OPEN = 4,
+ CT_DCCP_CLOSEREQ = 5,
+ CT_DCCP_CLOSING = 6,
+ CT_DCCP_TIMEWAIT = 7,
+ CT_DCCP_IGNORE = 8,
+ CT_DCCP_INVALID = 9,
+ __CT_DCCP_MAX = 10,
+};
+
+enum ip_conntrack_dir {
+ IP_CT_DIR_ORIGINAL = 0,
+ IP_CT_DIR_REPLY = 1,
+ IP_CT_DIR_MAX = 2,
+};
+
+enum sctp_conntrack {
+ SCTP_CONNTRACK_NONE = 0,
+ SCTP_CONNTRACK_CLOSED = 1,
+ SCTP_CONNTRACK_COOKIE_WAIT = 2,
+ SCTP_CONNTRACK_COOKIE_ECHOED = 3,
+ SCTP_CONNTRACK_ESTABLISHED = 4,
+ SCTP_CONNTRACK_SHUTDOWN_SENT = 5,
+ SCTP_CONNTRACK_SHUTDOWN_RECD = 6,
+ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7,
+ SCTP_CONNTRACK_HEARTBEAT_SENT = 8,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED = 9,
+ SCTP_CONNTRACK_MAX = 10,
+};
+
+enum udp_conntrack {
+ UDP_CT_UNREPLIED = 0,
+ UDP_CT_REPLIED = 1,
+ UDP_CT_MAX = 2,
+};
+
+enum gre_conntrack {
+ GRE_CT_UNREPLIED = 0,
+ GRE_CT_REPLIED = 1,
+ GRE_CT_MAX = 2,
+};
+
+enum {
+ XFRM_POLICY_IN = 0,
+ XFRM_POLICY_OUT = 1,
+ XFRM_POLICY_FWD = 2,
+ XFRM_POLICY_MASK = 3,
+ XFRM_POLICY_MAX = 3,
+};
+
+enum netns_bpf_attach_type {
+ NETNS_BPF_INVALID = -1,
+ NETNS_BPF_FLOW_DISSECTOR = 0,
+ MAX_NETNS_BPF_ATTACH_TYPE = 1,
+};
+
+enum skb_ext_id {
+ SKB_EXT_SEC_PATH = 0,
+ SKB_EXT_NUM = 1,
+};
+
+enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE = 0,
+ SCHED_TUNABLESCALING_LOG = 1,
+ SCHED_TUNABLESCALING_LINEAR = 2,
+ SCHED_TUNABLESCALING_END = 3,
+};
+
+enum audit_ntp_type {
+ AUDIT_NTP_OFFSET = 0,
+ AUDIT_NTP_FREQ = 1,
+ AUDIT_NTP_STATUS = 2,
+ AUDIT_NTP_TAI = 3,
+ AUDIT_NTP_TICK = 4,
+ AUDIT_NTP_ADJUST = 5,
+ AUDIT_NTP_NVALS = 6,
+};
+
+typedef long int (*sys_call_ptr_t)(const struct pt_regs *);
+
+struct io_bitmap {
+ u64 sequence;
+ refcount_t refcnt;
+ unsigned int max;
+ long unsigned int bitmap[1024];
+};
+
+struct seccomp_data {
+ int nr;
+ __u32 arch;
+ __u64 instruction_pointer;
+ __u64 args[6];
+};
+
+struct ksignal {
+ struct k_sigaction ka;
+ kernel_siginfo_t info;
+ int sig;
+};
+
+enum {
+ TASKSTATS_CMD_UNSPEC = 0,
+ TASKSTATS_CMD_GET = 1,
+ TASKSTATS_CMD_NEW = 2,
+ __TASKSTATS_CMD_MAX = 3,
+};
+
+enum {
+ HI_SOFTIRQ = 0,
+ TIMER_SOFTIRQ = 1,
+ NET_TX_SOFTIRQ = 2,
+ NET_RX_SOFTIRQ = 3,
+ BLOCK_SOFTIRQ = 4,
+ IRQ_POLL_SOFTIRQ = 5,
+ TASKLET_SOFTIRQ = 6,
+ SCHED_SOFTIRQ = 7,
+ HRTIMER_SOFTIRQ = 8,
+ RCU_SOFTIRQ = 9,
+ NR_SOFTIRQS = 10,
+};
+
+enum cpu_usage_stat {
+ CPUTIME_USER = 0,
+ CPUTIME_NICE = 1,
+ CPUTIME_SYSTEM = 2,
+ CPUTIME_SOFTIRQ = 3,
+ CPUTIME_IRQ = 4,
+ CPUTIME_IDLE = 5,
+ CPUTIME_IOWAIT = 6,
+ CPUTIME_STEAL = 7,
+ CPUTIME_GUEST = 8,
+ CPUTIME_GUEST_NICE = 9,
+ NR_STATS = 10,
+};
+
+enum {
+ EI_ETYPE_NONE = 0,
+ EI_ETYPE_NULL = 1,
+ EI_ETYPE_ERRNO = 2,
+ EI_ETYPE_ERRNO_NULL = 3,
+ EI_ETYPE_TRUE = 4,
+};
+
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED = 0,
+ BPF_CGROUP_STORAGE_PERCPU = 1,
+ __BPF_CGROUP_STORAGE_MAX = 2,
+};
+
+enum cgroup_subsys_id {
+ cpuset_cgrp_id = 0,
+ cpu_cgrp_id = 1,
+ cpuacct_cgrp_id = 2,
+ io_cgrp_id = 3,
+ memory_cgrp_id = 4,
+ devices_cgrp_id = 5,
+ freezer_cgrp_id = 6,
+ net_cls_cgrp_id = 7,
+ perf_event_cgrp_id = 8,
+ net_prio_cgrp_id = 9,
+ hugetlb_cgrp_id = 10,
+ pids_cgrp_id = 11,
+ rdma_cgrp_id = 12,
+ misc_cgrp_id = 13,
+ debug_cgrp_id = 14,
+ CGROUP_SUBSYS_COUNT = 15,
+};
+
+typedef u8 kprobe_opcode_t;
+
+struct arch_specific_insn {
+ kprobe_opcode_t *insn;
+ bool boostable;
+ bool if_modifier;
+};
+
+struct kprobe;
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ long unsigned int status;
+ long unsigned int old_flags;
+ long unsigned int saved_flags;
+};
+
+typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *);
+
+typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int);
+
+typedef int (*kprobe_fault_handler_t)(struct kprobe *, struct pt_regs *, int);
+
+struct kprobe {
+ struct hlist_node hlist;
+ struct list_head list;
+ long unsigned int nmissed;
+ kprobe_opcode_t *addr;
+ const char *symbol_name;
+ unsigned int offset;
+ kprobe_pre_handler_t pre_handler;
+ kprobe_post_handler_t post_handler;
+ kprobe_fault_handler_t fault_handler;
+ kprobe_opcode_t opcode;
+ struct arch_specific_insn ainsn;
+ u32 flags;
+};
+
+struct kprobe_ctlblk {
+ long unsigned int kprobe_status;
+ long unsigned int kprobe_old_flags;
+ long unsigned int kprobe_saved_flags;
+ struct prev_kprobe prev_kprobe;
+};
+
+struct kretprobe_blackpoint {
+ const char *name;
+ void *addr;
+};
+
+struct kprobe_insn_cache {
+ struct mutex mutex;
+ void * (*alloc)();
+ void (*free)(void *);
+ struct list_head pages;
+ size_t insn_size;
+ int nr_garbage;
+};
+
+struct trace_event_raw_sys_enter {
+ struct trace_entry ent;
+ long int id;
+ long unsigned int args[6];
+ char __data[0];
+};
+
+struct trace_event_raw_sys_exit {
+ struct trace_entry ent;
+ long int id;
+ long int ret;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_sys_enter {};
+
+struct trace_event_data_offsets_sys_exit {};
+
+typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int);
+
+typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int);
+
+struct alt_instr {
+ s32 instr_offset;
+ s32 repl_offset;
+ u16 cpuid;
+ u8 instrlen;
+ u8 replacementlen;
+ u8 padlen;
+} __attribute__((packed));
+
+struct mmu_gather_batch {
+ struct mmu_gather_batch *next;
+ unsigned int nr;
+ unsigned int max;
+ struct page *pages[0];
+};
+
+struct mmu_table_batch;
+
+struct mmu_gather {
+ struct mm_struct *mm;
+ struct mmu_table_batch *batch;
+ long unsigned int start;
+ long unsigned int end;
+ unsigned int fullmm: 1;
+ unsigned int need_flush_all: 1;
+ unsigned int freed_tables: 1;
+ unsigned int cleared_ptes: 1;
+ unsigned int cleared_pmds: 1;
+ unsigned int cleared_puds: 1;
+ unsigned int cleared_p4ds: 1;
+ unsigned int vma_exec: 1;
+ unsigned int vma_huge: 1;
+ unsigned int batch_count;
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[8];
+};
+
+struct timens_offset {
+ s64 sec;
+ u64 nsec;
+};
+
+enum vm_fault_reason {
+ VM_FAULT_OOM = 1,
+ VM_FAULT_SIGBUS = 2,
+ VM_FAULT_MAJOR = 4,
+ VM_FAULT_WRITE = 8,
+ VM_FAULT_HWPOISON = 16,
+ VM_FAULT_HWPOISON_LARGE = 32,
+ VM_FAULT_SIGSEGV = 64,
+ VM_FAULT_NOPAGE = 256,
+ VM_FAULT_LOCKED = 512,
+ VM_FAULT_RETRY = 1024,
+ VM_FAULT_FALLBACK = 2048,
+ VM_FAULT_DONE_COW = 4096,
+ VM_FAULT_NEEDDSYNC = 8192,
+ VM_FAULT_HINDEX_MASK = 983040,
+};
+
+struct vm_special_mapping {
+ const char *name;
+ struct page **pages;
+ vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *);
+ int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *);
+};
+
+struct timens_offsets {
+ struct timespec64 monotonic;
+ struct timespec64 boottime;
+};
+
+struct time_namespace {
+ struct kref kref;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct ns_common ns;
+ struct timens_offsets offsets;
+ struct page *vvar_page;
+ bool frozen_offsets;
+};
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+};
+
+struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum vdso_clock_mode {
+ VDSO_CLOCKMODE_NONE = 0,
+ VDSO_CLOCKMODE_TSC = 1,
+ VDSO_CLOCKMODE_PVCLOCK = 2,
+ VDSO_CLOCKMODE_HVCLOCK = 3,
+ VDSO_CLOCKMODE_MAX = 4,
+ VDSO_CLOCKMODE_TIMENS = 2147483647,
+};
+
+struct vdso_timestamp {
+ u64 sec;
+ u64 nsec;
+};
+
+struct vdso_data {
+ u32 seq;
+ s32 clock_mode;
+ u64 cycle_last;
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ union {
+ struct vdso_timestamp basetime[12];
+ struct timens_offset offset[12];
+ };
+ s32 tz_minuteswest;
+ s32 tz_dsttime;
+ u32 hrtimer_res;
+ u32 __unused;
+};
+
+struct irq_desc;
+
+typedef struct irq_desc *vector_irq_t[256];
+
+struct ms_hyperv_tsc_page {
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+};
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 misc_features;
+ u32 hints;
+ u32 nested_features;
+ u32 max_vp_index;
+ u32 max_lp_index;
+};
+
+struct mmu_table_batch {
+ struct callback_head rcu;
+ unsigned int nr;
+ void *tables[0];
+};
+
+enum x86_pf_error_code {
+ X86_PF_PROT = 1,
+ X86_PF_WRITE = 2,
+ X86_PF_USER = 4,
+ X86_PF_RSVD = 8,
+ X86_PF_INSTR = 16,
+ X86_PF_PK = 32,
+};
+
+struct trace_event_raw_emulate_vsyscall {
+ struct trace_entry ent;
+ int nr;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_emulate_vsyscall {};
+
+typedef void (*btf_trace_emulate_vsyscall)(void *, int);
+
+enum {
+ EMULATE = 0,
+ XONLY = 1,
+ NONE = 2,
+};
+
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+ PERF_TYPE_BREAKPOINT = 5,
+ PERF_TYPE_MAX = 6,
+};
+
+enum perf_hw_id {
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
+ PERF_COUNT_HW_MAX = 10,
+};
+
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+ PERF_COUNT_HW_CACHE_NODE = 6,
+ PERF_COUNT_HW_CACHE_MAX = 7,
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+ PERF_COUNT_HW_CACHE_OP_MAX = 3,
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+ PERF_COUNT_HW_CACHE_RESULT_MAX = 2,
+};
+
+enum perf_event_sample_format {
+ PERF_SAMPLE_IP = 1,
+ PERF_SAMPLE_TID = 2,
+ PERF_SAMPLE_TIME = 4,
+ PERF_SAMPLE_ADDR = 8,
+ PERF_SAMPLE_READ = 16,
+ PERF_SAMPLE_CALLCHAIN = 32,
+ PERF_SAMPLE_ID = 64,
+ PERF_SAMPLE_CPU = 128,
+ PERF_SAMPLE_PERIOD = 256,
+ PERF_SAMPLE_STREAM_ID = 512,
+ PERF_SAMPLE_RAW = 1024,
+ PERF_SAMPLE_BRANCH_STACK = 2048,
+ PERF_SAMPLE_REGS_USER = 4096,
+ PERF_SAMPLE_STACK_USER = 8192,
+ PERF_SAMPLE_WEIGHT = 16384,
+ PERF_SAMPLE_DATA_SRC = 32768,
+ PERF_SAMPLE_IDENTIFIER = 65536,
+ PERF_SAMPLE_TRANSACTION = 131072,
+ PERF_SAMPLE_REGS_INTR = 262144,
+ PERF_SAMPLE_PHYS_ADDR = 524288,
+ PERF_SAMPLE_AUX = 1048576,
+ PERF_SAMPLE_CGROUP = 2097152,
+ PERF_SAMPLE_MAX = 4194304,
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 0,
+};
+
+enum perf_branch_sample_type {
+ PERF_SAMPLE_BRANCH_USER = 1,
+ PERF_SAMPLE_BRANCH_KERNEL = 2,
+ PERF_SAMPLE_BRANCH_HV = 4,
+ PERF_SAMPLE_BRANCH_ANY = 8,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 16,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 32,
+ PERF_SAMPLE_BRANCH_IND_CALL = 64,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 128,
+ PERF_SAMPLE_BRANCH_IN_TX = 256,
+ PERF_SAMPLE_BRANCH_NO_TX = 512,
+ PERF_SAMPLE_BRANCH_COND = 1024,
+ PERF_SAMPLE_BRANCH_CALL_STACK = 2048,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 4096,
+ PERF_SAMPLE_BRANCH_CALL = 8192,
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 16384,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 32768,
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536,
+ PERF_SAMPLE_BRANCH_HW_INDEX = 131072,
+ PERF_SAMPLE_BRANCH_MAX = 262144,
+};
+
+struct perf_event_mmap_page {
+ __u32 version;
+ __u32 compat_version;
+ __u32 lock;
+ __u32 index;
+ __s64 offset;
+ __u64 time_enabled;
+ __u64 time_running;
+ union {
+ __u64 capabilities;
+ struct {
+ __u64 cap_bit0: 1;
+ __u64 cap_bit0_is_deprecated: 1;
+ __u64 cap_user_rdpmc: 1;
+ __u64 cap_user_time: 1;
+ __u64 cap_user_time_zero: 1;
+ __u64 cap_____res: 59;
+ };
+ };
+ __u16 pmc_width;
+ __u16 time_shift;
+ __u32 time_mult;
+ __u64 time_offset;
+ __u64 time_zero;
+ __u32 size;
+ __u8 __reserved[948];
+ __u64 data_head;
+ __u64 data_tail;
+ __u64 data_offset;
+ __u64 data_size;
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
+};
+
+struct ldt_struct {
+ struct desc_struct *entries;
+ unsigned int nr_entries;
+ int slot;
+};
+
+struct __large_struct {
+ long unsigned int buf[100];
+};
+
+struct x86_pmu_capability {
+ int version;
+ int num_counters_gp;
+ int num_counters_fixed;
+ int bit_width_gp;
+ int bit_width_fixed;
+ unsigned int events_mask;
+ int events_mask_len;
+};
+
+enum stack_type {
+ STACK_TYPE_UNKNOWN = 0,
+ STACK_TYPE_TASK = 1,
+ STACK_TYPE_IRQ = 2,
+ STACK_TYPE_SOFTIRQ = 3,
+ STACK_TYPE_ENTRY = 4,
+ STACK_TYPE_EXCEPTION = 5,
+ STACK_TYPE_EXCEPTION_LAST = 8,
+};
+
+struct stack_info {
+ enum stack_type type;
+ long unsigned int *begin;
+ long unsigned int *end;
+ long unsigned int *next_sp;
+};
+
+struct stack_frame {
+ struct stack_frame *next_frame;
+ long unsigned int return_address;
+};
+
+struct perf_guest_switch_msr {
+ unsigned int msr;
+ u64 host;
+ u64 guest;
+};
+
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *, struct device_attribute *, char *);
+ ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t);
+};
+
+enum perf_event_x86_regs {
+ PERF_REG_X86_AX = 0,
+ PERF_REG_X86_BX = 1,
+ PERF_REG_X86_CX = 2,
+ PERF_REG_X86_DX = 3,
+ PERF_REG_X86_SI = 4,
+ PERF_REG_X86_DI = 5,
+ PERF_REG_X86_BP = 6,
+ PERF_REG_X86_SP = 7,
+ PERF_REG_X86_IP = 8,
+ PERF_REG_X86_FLAGS = 9,
+ PERF_REG_X86_CS = 10,
+ PERF_REG_X86_SS = 11,
+ PERF_REG_X86_DS = 12,
+ PERF_REG_X86_ES = 13,
+ PERF_REG_X86_FS = 14,
+ PERF_REG_X86_GS = 15,
+ PERF_REG_X86_R8 = 16,
+ PERF_REG_X86_R9 = 17,
+ PERF_REG_X86_R10 = 18,
+ PERF_REG_X86_R11 = 19,
+ PERF_REG_X86_R12 = 20,
+ PERF_REG_X86_R13 = 21,
+ PERF_REG_X86_R14 = 22,
+ PERF_REG_X86_R15 = 23,
+ PERF_REG_X86_32_MAX = 16,
+ PERF_REG_X86_64_MAX = 24,
+ PERF_REG_X86_XMM0 = 32,
+ PERF_REG_X86_XMM1 = 34,
+ PERF_REG_X86_XMM2 = 36,
+ PERF_REG_X86_XMM3 = 38,
+ PERF_REG_X86_XMM4 = 40,
+ PERF_REG_X86_XMM5 = 42,
+ PERF_REG_X86_XMM6 = 44,
+ PERF_REG_X86_XMM7 = 46,
+ PERF_REG_X86_XMM8 = 48,
+ PERF_REG_X86_XMM9 = 50,
+ PERF_REG_X86_XMM10 = 52,
+ PERF_REG_X86_XMM11 = 54,
+ PERF_REG_X86_XMM12 = 56,
+ PERF_REG_X86_XMM13 = 58,
+ PERF_REG_X86_XMM14 = 60,
+ PERF_REG_X86_XMM15 = 62,
+ PERF_REG_X86_XMM_MAX = 64,
+};
+
+struct perf_callchain_entry_ctx {
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short int contexts;
+ bool contexts_maxed;
+};
+
+struct perf_pmu_events_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+};
+
+struct perf_pmu_events_ht_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
+};
+
+enum {
+ NMI_LOCAL = 0,
+ NMI_UNKNOWN = 1,
+ NMI_SERR = 2,
+ NMI_IO_CHECK = 3,
+ NMI_MAX = 4,
+};
+
+typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
+
+struct nmiaction {
+ struct list_head list;
+ nmi_handler_t handler;
+ u64 max_duration;
+ long unsigned int flags;
+ const char *name;
+};
+
+struct cyc2ns_data {
+ u32 cyc2ns_mul;
+ u32 cyc2ns_shift;
+ u64 cyc2ns_offset;
+};
+
+struct unwind_state {
+ struct stack_info stack_info;
+ long unsigned int stack_mask;
+ struct task_struct *task;
+ int graph_idx;
+ bool error;
+ bool signal;
+ bool full_regs;
+ long unsigned int sp;
+ long unsigned int bp;
+ long unsigned int ip;
+ struct pt_regs *regs;
+ struct pt_regs *prev_regs;
+};
+
+enum extra_reg_type {
+ EXTRA_REG_NONE = -1,
+ EXTRA_REG_RSP_0 = 0,
+ EXTRA_REG_RSP_1 = 1,
+ EXTRA_REG_LBR = 2,
+ EXTRA_REG_LDLAT = 3,
+ EXTRA_REG_FE = 4,
+ EXTRA_REG_MAX = 5,
+};
+
+struct event_constraint {
+ union {
+ long unsigned int idxmsk[1];
+ u64 idxmsk64;
+ };
+ u64 code;
+ u64 cmask;
+ int weight;
+ int overlap;
+ int flags;
+ unsigned int size;
+};
+
+struct amd_nb {
+ int nb_id;
+ int refcnt;
+ struct perf_event *owners[64];
+ struct event_constraint event_constraints[64];
+};
+
+struct er_account {
+ raw_spinlock_t lock;
+ u64 config;
+ u64 reg;
+ atomic_t ref;
+};
+
+struct intel_shared_regs {
+ struct er_account regs[5];
+ int refcnt;
+ unsigned int core_id;
+};
+
+enum intel_excl_state_type {
+ INTEL_EXCL_UNUSED = 0,
+ INTEL_EXCL_SHARED = 1,
+ INTEL_EXCL_EXCLUSIVE = 2,
+};
+
+struct intel_excl_states {
+ enum intel_excl_state_type state[64];
+ bool sched_started;
+};
+
+struct intel_excl_cntrs {
+ raw_spinlock_t lock;
+ struct intel_excl_states states[2];
+ union {
+ u16 has_exclusive[2];
+ u32 exclusive_present;
+ };
+ int refcnt;
+ unsigned int core_id;
+};
+
+enum {
+ X86_PERF_KFREE_SHARED = 0,
+ X86_PERF_KFREE_EXCL = 1,
+ X86_PERF_KFREE_MAX = 2,
+};
+
+struct x86_perf_task_context;
+
+struct cpu_hw_events {
+ struct perf_event *events[64];
+ long unsigned int active_mask[1];
+ long unsigned int running[1];
+ int enabled;
+ int n_events;
+ int n_added;
+ int n_txn;
+ int assign[64];
+ u64 tags[64];
+ struct perf_event *event_list[64];
+ struct event_constraint *event_constraint[64];
+ int n_excl;
+ unsigned int txn_flags;
+ int is_fake;
+ struct debug_store *ds;
+ void *ds_pebs_vaddr;
+ void *ds_bts_vaddr;
+ u64 pebs_enabled;
+ int n_pebs;
+ int n_large_pebs;
+ int n_pebs_via_pt;
+ int pebs_output;
+ u64 pebs_data_cfg;
+ u64 active_pebs_data_cfg;
+ int pebs_record_size;
+ int lbr_users;
+ int lbr_pebs_users;
+ struct perf_branch_stack lbr_stack;
+ struct perf_branch_entry lbr_entries[32];
+ struct er_account *lbr_sel;
+ u64 br_sel;
+ struct x86_perf_task_context *last_task_ctx;
+ int last_log_id;
+ u64 intel_ctrl_guest_mask;
+ u64 intel_ctrl_host_mask;
+ struct perf_guest_switch_msr guest_switch_msrs[64];
+ u64 intel_cp_status;
+ struct intel_shared_regs *shared_regs;
+ struct event_constraint *constraint_list;
+ struct intel_excl_cntrs *excl_cntrs;
+ int excl_thread_id;
+ u64 tfa_shadow;
+ struct amd_nb *amd_nb;
+ u64 perf_ctr_virt_mask;
+ int n_pair;
+ void *kfree_on_online[2];
+};
+
+struct x86_perf_task_context {
+ u64 lbr_from[32];
+ u64 lbr_to[32];
+ u64 lbr_info[32];
+ int tos;
+ int valid_lbrs;
+ int lbr_callstack_users;
+ int lbr_stack_state;
+ int log_id;
+};
+
+struct extra_reg {
+ unsigned int event;
+ unsigned int msr;
+ u64 config_mask;
+ u64 valid_mask;
+ int idx;
+ bool extra_msr_access;
+};
+
+union perf_capabilities {
+ struct {
+ u64 lbr_format: 6;
+ u64 pebs_trap: 1;
+ u64 pebs_arch_reg: 1;
+ u64 pebs_format: 4;
+ u64 smm_freeze: 1;
+ u64 full_width_write: 1;
+ u64 pebs_baseline: 1;
+ u64 pebs_metrics_available: 1;
+ u64 pebs_output_pt_available: 1;
+ };
+ u64 capabilities;
+};
+
+struct x86_pmu_quirk {
+ struct x86_pmu_quirk *next;
+ void (*func)();
+};
+
+enum {
+ x86_lbr_exclusive_lbr = 0,
+ x86_lbr_exclusive_bts = 1,
+ x86_lbr_exclusive_pt = 2,
+ x86_lbr_exclusive_max = 3,
+};
+
+struct x86_pmu {
+ const char *name;
+ int version;
+ int (*handle_irq)(struct pt_regs *);
+ void (*disable_all)();
+ void (*enable_all)(int);
+ void (*enable)(struct perf_event *);
+ void (*disable)(struct perf_event *);
+ void (*add)(struct perf_event *);
+ void (*del)(struct perf_event *);
+ void (*read)(struct perf_event *);
+ int (*hw_config)(struct perf_event *);
+ int (*schedule_events)(struct cpu_hw_events *, int, int *);
+ unsigned int eventsel;
+ unsigned int perfctr;
+ int (*addr_offset)(int, bool);
+ int (*rdpmc_index)(int);
+ u64 (*event_map)(int);
+ int max_events;
+ int num_counters;
+ int num_counters_fixed;
+ int cntval_bits;
+ u64 cntval_mask;
+ union {
+ long unsigned int events_maskl;
+ long unsigned int events_mask[1];
+ };
+ int events_mask_len;
+ int apic;
+ u64 max_period;
+ struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *, int, struct perf_event *);
+ void (*put_event_constraints)(struct cpu_hw_events *, struct perf_event *);
+ void (*start_scheduling)(struct cpu_hw_events *);
+ void (*commit_scheduling)(struct cpu_hw_events *, int, int);
+ void (*stop_scheduling)(struct cpu_hw_events *);
+ struct event_constraint *event_constraints;
+ struct x86_pmu_quirk *quirks;
+ int perfctr_second_write;
+ u64 (*limit_period)(struct perf_event *, u64);
+ unsigned int late_ack: 1;
+ unsigned int enabled_ack: 1;
+ unsigned int counter_freezing: 1;
+ int attr_rdpmc_broken;
+ int attr_rdpmc;
+ struct attribute **format_attrs;
+ ssize_t (*events_sysfs_show)(char *, u64);
+ const struct attribute_group **attr_update;
+ long unsigned int attr_freeze_on_smi;
+ int (*cpu_prepare)(int);
+ void (*cpu_starting)(int);
+ void (*cpu_dying)(int);
+ void (*cpu_dead)(int);
+ void (*check_microcode)();
+ void (*sched_task)(struct perf_event_context *, bool);
+ u64 intel_ctrl;
+ union perf_capabilities intel_cap;
+ unsigned int bts: 1;
+ unsigned int bts_active: 1;
+ unsigned int pebs: 1;
+ unsigned int pebs_active: 1;
+ unsigned int pebs_broken: 1;
+ unsigned int pebs_prec_dist: 1;
+ unsigned int pebs_no_tlb: 1;
+ unsigned int pebs_no_isolation: 1;
+ int pebs_record_size;
+ int pebs_buffer_size;
+ int max_pebs_events;
+ void (*drain_pebs)(struct pt_regs *);
+ struct event_constraint *pebs_constraints;
+ void (*pebs_aliases)(struct perf_event *);
+ long unsigned int large_pebs_flags;
+ u64 rtm_abort_event;
+ long unsigned int lbr_tos;
+ long unsigned int lbr_from;
+ long unsigned int lbr_to;
+ int lbr_nr;
+ u64 lbr_sel_mask;
+ const int *lbr_sel_map;
+ bool lbr_double_abort;
+ bool lbr_pt_coexist;
+ atomic_t lbr_exclusive[3];
+ void (*swap_task_ctx)(struct perf_event_context *, struct perf_event_context *);
+ unsigned int amd_nb_constraints: 1;
+ u64 perf_ctr_pair_en;
+ struct extra_reg *extra_regs;
+ unsigned int flags;
+ struct perf_guest_switch_msr * (*guest_get_msrs)(int *);
+ int (*check_period)(struct perf_event *, u64);
+ int (*aux_output_match)(struct perf_event *);
+};
+
+struct sched_state {
+ int weight;
+ int event;
+ int counter;
+ int unassigned;
+ int nr_gp;
+ u64 used;
+};
+
+struct perf_sched {
+ int max_weight;
+ int max_events;
+ int max_gp;
+ int saved_states;
+ struct event_constraint **constraints;
+ struct sched_state state;
+ struct sched_state saved[2];
+};
+
+typedef int pto_T_____2;
+
+typedef unsigned int pao_T_____2;
+
+enum migratetype {
+ MIGRATE_UNMOVABLE = 0,
+ MIGRATE_MOVABLE = 1,
+ MIGRATE_RECLAIMABLE = 2,
+ MIGRATE_PCPTYPES = 3,
+ MIGRATE_HIGHATOMIC = 3,
+ MIGRATE_ISOLATE = 4,
+ MIGRATE_TYPES = 5,
+};
+
+enum zone_watermarks {
+ WMARK_MIN = 0,
+ WMARK_LOW = 1,
+ WMARK_HIGH = 2,
+ NR_WMARK = 3,
+};
+
+enum {
+ ZONELIST_FALLBACK = 0,
+ MAX_ZONELISTS = 1,
+};
+
+struct perf_msr {
+ u64 msr;
+ struct attribute_group *grp;
+ bool (*test)(int, void *);
+ bool no_check;
+};
+
+struct amd_uncore {
+ int id;
+ int refcnt;
+ int cpu;
+ int num_counters;
+ int rdpmc_base;
+ u32 msr_base;
+ cpumask_t *active_mask;
+ struct pmu *pmu;
+ struct perf_event *events[6];
+ struct hlist_node node;
+};
+
+typedef int pci_power_t;
+
+typedef unsigned int pci_channel_state_t;
+
+typedef short unsigned int pci_dev_flags_t;
+
+struct pci_bus;
+
+struct pci_slot;
+
+struct pci_driver;
+
+struct pcie_link_state;
+
+struct pci_vpd;
+
+struct pci_dev {
+ struct list_head bus_list;
+ struct pci_bus *bus;
+ struct pci_bus *subordinate;
+ void *sysdata;
+ struct proc_dir_entry *procent;
+ struct pci_slot *slot;
+ unsigned int devfn;
+ short unsigned int vendor;
+ short unsigned int device;
+ short unsigned int subsystem_vendor;
+ short unsigned int subsystem_device;
+ unsigned int class;
+ u8 revision;
+ u8 hdr_type;
+ u8 pcie_cap;
+ u8 msi_cap;
+ u8 msix_cap;
+ u8 pcie_mpss: 3;
+ u8 rom_base_reg;
+ u8 pin;
+ u16 pcie_flags_reg;
+ long unsigned int *dma_alias_mask;
+ struct pci_driver *driver;
+ u64 dma_mask;
+ struct device_dma_parameters dma_parms;
+ pci_power_t current_state;
+ unsigned int imm_ready: 1;
+ u8 pm_cap;
+ unsigned int pme_support: 5;
+ unsigned int pme_poll: 1;
+ unsigned int d1_support: 1;
+ unsigned int d2_support: 1;
+ unsigned int no_d1d2: 1;
+ unsigned int no_d3cold: 1;
+ unsigned int bridge_d3: 1;
+ unsigned int d3cold_allowed: 1;
+ unsigned int mmio_always_on: 1;
+ unsigned int wakeup_prepared: 1;
+ unsigned int runtime_d3cold: 1;
+ unsigned int skip_bus_pm: 1;
+ unsigned int ignore_hotplug: 1;
+ unsigned int hotplug_user_indicators: 1;
+ unsigned int clear_retrain_link: 1;
+ unsigned int d3_delay;
+ unsigned int d3cold_delay;
+ struct pcie_link_state *link_state;
+ unsigned int ltr_path: 1;
+ unsigned int eetlp_prefix_path: 1;
+ pci_channel_state_t error_state;
+ struct device dev;
+ int cfg_size;
+ unsigned int irq;
+ struct resource resource[11];
+ bool match_driver;
+ unsigned int transparent: 1;
+ unsigned int io_window: 1;
+ unsigned int pref_window: 1;
+ unsigned int pref_64_window: 1;
+ unsigned int multifunction: 1;
+ unsigned int is_busmaster: 1;
+ unsigned int no_msi: 1;
+ unsigned int no_64bit_msi: 1;
+ unsigned int block_cfg_access: 1;
+ unsigned int broken_parity_status: 1;
+ unsigned int irq_reroute_variant: 2;
+ unsigned int msi_enabled: 1;
+ unsigned int msix_enabled: 1;
+ unsigned int ari_enabled: 1;
+ unsigned int ats_enabled: 1;
+ unsigned int pasid_enabled: 1;
+ unsigned int pri_enabled: 1;
+ unsigned int is_managed: 1;
+ unsigned int needs_freset: 1;
+ unsigned int state_saved: 1;
+ unsigned int is_physfn: 1;
+ unsigned int is_virtfn: 1;
+ unsigned int reset_fn: 1;
+ unsigned int is_hotplug_bridge: 1;
+ unsigned int shpc_managed: 1;
+ unsigned int is_thunderbolt: 1;
+ unsigned int untrusted: 1;
+ unsigned int broken_intx_masking: 1;
+ unsigned int io_window_1k: 1;
+ unsigned int irq_managed: 1;
+ unsigned int non_compliant_bars: 1;
+ unsigned int is_probed: 1;
+ unsigned int link_active_reporting: 1;
+ unsigned int no_vf_scan: 1;
+ pci_dev_flags_t dev_flags;
+ atomic_t enable_cnt;
+ u32 saved_config_space[16];
+ struct hlist_head saved_cap_space;
+ struct bin_attribute *rom_attr;
+ int rom_attr_enabled;
+ struct bin_attribute *res_attr[11];
+ struct bin_attribute *res_attr_wc[11];
+ unsigned int broken_cmd_compl: 1;
+ const struct attribute_group **msi_irq_groups;
+ struct pci_vpd *vpd;
+ phys_addr_t rom;
+ size_t romlen;
+ char *driver_override;
+ long unsigned int priv_flags;
+};
+
+struct pci_device_id {
+ __u32 vendor;
+ __u32 device;
+ __u32 subvendor;
+ __u32 subdevice;
+ __u32 class;
+ __u32 class_mask;
+ kernel_ulong_t driver_data;
+};
+
+struct hotplug_slot;
+
+struct pci_slot {
+ struct pci_bus *bus;
+ struct list_head list;
+ struct hotplug_slot *hotplug;
+ unsigned char number;
+ struct kobject kobj;
+};
+
+typedef short unsigned int pci_bus_flags_t;
+
+struct pci_ops;
+
+struct msi_controller;
+
+struct pci_bus {
+ struct list_head node;
+ struct pci_bus *parent;
+ struct list_head children;
+ struct list_head devices;
+ struct pci_dev *self;
+ struct list_head slots;
+ struct resource *resource[4];
+ struct list_head resources;
+ struct resource busn_res;
+ struct pci_ops *ops;
+ struct msi_controller *msi;
+ void *sysdata;
+ struct proc_dir_entry *procdir;
+ unsigned char number;
+ unsigned char primary;
+ unsigned char max_bus_speed;
+ unsigned char cur_bus_speed;
+ char name[48];
+ short unsigned int bridge_ctl;
+ pci_bus_flags_t bus_flags;
+ struct device *bridge;
+ struct device dev;
+ struct bin_attribute *legacy_io;
+ struct bin_attribute *legacy_mem;
+ unsigned int is_added: 1;
+};
+
+enum {
+ PCI_STD_RESOURCES = 0,
+ PCI_STD_RESOURCE_END = 5,
+ PCI_ROM_RESOURCE = 6,
+ PCI_BRIDGE_RESOURCES = 7,
+ PCI_BRIDGE_RESOURCE_END = 10,
+ PCI_NUM_RESOURCES = 11,
+ DEVICE_COUNT_RESOURCE = 11,
+};
+
+enum pci_channel_state {
+ pci_channel_io_normal = 1,
+ pci_channel_io_frozen = 2,
+ pci_channel_io_perm_failure = 3,
+};
+
+typedef unsigned int pcie_reset_state_t;
+
+struct pci_dynids {
+ spinlock_t lock;
+ struct list_head list;
+};
+
+struct pci_error_handlers;
+
+struct pci_driver {
+ struct list_head node;
+ const char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *, const struct pci_device_id *);
+ void (*remove)(struct pci_dev *);
+ int (*suspend)(struct pci_dev *, pm_message_t);
+ int (*resume)(struct pci_dev *);
+ void (*shutdown)(struct pci_dev *);
+ int (*sriov_configure)(struct pci_dev *, int);
+ const struct pci_error_handlers *err_handler;
+ const struct attribute_group **groups;
+ struct device_driver driver;
+ struct pci_dynids dynids;
+};
+
+struct pci_ops {
+ int (*add_bus)(struct pci_bus *);
+ void (*remove_bus)(struct pci_bus *);
+ void * (*map_bus)(struct pci_bus *, unsigned int, int);
+ int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);
+ int (*write)(struct pci_bus *, unsigned int, int, int, u32);
+};
+
+typedef unsigned int pci_ers_result_t;
+
+struct pci_error_handlers {
+ pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state);
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *);
+ pci_ers_result_t (*slot_reset)(struct pci_dev *);
+ void (*reset_prepare)(struct pci_dev *);
+ void (*reset_done)(struct pci_dev *);
+ void (*resume)(struct pci_dev *);
+};
+
+enum pcie_bus_config_types {
+ PCIE_BUS_TUNE_OFF = 0,
+ PCIE_BUS_DEFAULT = 1,
+ PCIE_BUS_SAFE = 2,
+ PCIE_BUS_PERFORMANCE = 3,
+ PCIE_BUS_PEER2PEER = 4,
+};
+
+enum ibs_states {
+ IBS_ENABLED = 0,
+ IBS_STARTED = 1,
+ IBS_STOPPING = 2,
+ IBS_STOPPED = 3,
+ IBS_MAX_STATES = 4,
+};
+
+struct cpu_perf_ibs {
+ struct perf_event *event;
+ long unsigned int state[1];
+};
+
+struct perf_ibs {
+ struct pmu pmu;
+ unsigned int msr;
+ u64 config_mask;
+ u64 cnt_mask;
+ u64 enable_mask;
+ u64 valid_mask;
+ u64 max_period;
+ long unsigned int offset_mask[1];
+ int offset_max;
+ struct cpu_perf_ibs *pcpu;
+ struct attribute **format_attrs;
+ struct attribute_group format_group;
+ const struct attribute_group *attr_groups[2];
+ u64 (*get_count)(u64);
+};
+
+struct perf_ibs_data {
+ u32 size;
+ union {
+ u32 data[0];
+ u32 caps;
+ };
+ u64 regs[8];
+};
+
+enum perf_msr_id {
+ PERF_MSR_TSC = 0,
+ PERF_MSR_APERF = 1,
+ PERF_MSR_MPERF = 2,
+ PERF_MSR_PPERF = 3,
+ PERF_MSR_SMI = 4,
+ PERF_MSR_PTSC = 5,
+ PERF_MSR_IRPERF = 6,
+ PERF_MSR_THERM = 7,
+ PERF_MSR_EVENT_MAX = 8,
+};
+
+struct x86_cpu_desc {
+ u8 x86_family;
+ u8 x86_vendor;
+ u8 x86_model;
+ u8 x86_stepping;
+ u32 x86_microcode_rev;
+};
+
+union cpuid10_eax {
+ struct {
+ unsigned int version_id: 8;
+ unsigned int num_counters: 8;
+ unsigned int bit_width: 8;
+ unsigned int mask_length: 8;
+ } split;
+ unsigned int full;
+};
+
+union cpuid10_ebx {
+ struct {
+ unsigned int no_unhalted_core_cycles: 1;
+ unsigned int no_instructions_retired: 1;
+ unsigned int no_unhalted_reference_cycles: 1;
+ unsigned int no_llc_reference: 1;
+ unsigned int no_llc_misses: 1;
+ unsigned int no_branch_instruction_retired: 1;
+ unsigned int no_branch_misses_retired: 1;
+ } split;
+ unsigned int full;
+};
+
+union cpuid10_edx {
+ struct {
+ unsigned int num_counters_fixed: 5;
+ unsigned int bit_width_fixed: 8;
+ unsigned int reserved: 19;
+ } split;
+ unsigned int full;
+};
+
+union x86_pmu_config {
+ struct {
+ u64 event: 8;
+ u64 umask: 8;
+ u64 usr: 1;
+ u64 os: 1;
+ u64 edge: 1;
+ u64 pc: 1;
+ u64 interrupt: 1;
+ u64 __reserved1: 1;
+ u64 en: 1;
+ u64 inv: 1;
+ u64 cmask: 8;
+ u64 event2: 4;
+ u64 __reserved2: 4;
+ u64 go: 1;
+ u64 ho: 1;
+ } bits;
+ u64 value;
+};
+
+enum pageflags {
+ PG_locked = 0,
+ PG_referenced = 1,
+ PG_uptodate = 2,
+ PG_dirty = 3,
+ PG_lru = 4,
+ PG_active = 5,
+ PG_workingset = 6,
+ PG_waiters = 7,
+ PG_error = 8,
+ PG_slab = 9,
+ PG_owner_priv_1 = 10,
+ PG_arch_1 = 11,
+ PG_reserved = 12,
+ PG_private = 13,
+ PG_private_2 = 14,
+ PG_writeback = 15,
+ PG_head = 16,
+ PG_mappedtodisk = 17,
+ PG_reclaim = 18,
+ PG_swapbacked = 19,
+ PG_unevictable = 20,
+ PG_mlocked = 21,
+ PG_uncached = 22,
+ __NR_PAGEFLAGS = 23,
+ PG_checked = 10,
+ PG_swapcache = 10,
+ PG_fscache = 14,
+ PG_pinned = 10,
+ PG_savepinned = 3,
+ PG_foreign = 10,
+ PG_xen_remapped = 10,
+ PG_slob_free = 13,
+ PG_double_map = 14,
+ PG_isolated = 18,
+ PG_reported = 2,
+};
+
+struct bts_ctx {
+ struct perf_output_handle handle;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct debug_store ds_back;
+ int state;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ BTS_STATE_STOPPED = 0,
+ BTS_STATE_INACTIVE = 1,
+ BTS_STATE_ACTIVE = 2,
+};
+
+struct bts_phys {
+ struct page *page;
+ long unsigned int size;
+ long unsigned int offset;
+ long unsigned int displacement;
+};
+
+struct bts_buffer {
+ size_t real_size;
+ unsigned int nr_pages;
+ unsigned int nr_bufs;
+ unsigned int cur_buf;
+ bool snapshot;
+ local_t data_size;
+ local_t head;
+ long unsigned int end;
+ void **data_pages;
+ struct bts_phys buf[0];
+};
+
+struct pebs_basic {
+ u64 format_size;
+ u64 ip;
+ u64 applicable_counters;
+ u64 tsc;
+};
+
+struct pebs_meminfo {
+ u64 address;
+ u64 aux;
+ u64 latency;
+ u64 tsx_tuning;
+};
+
+struct pebs_gprs {
+ u64 flags;
+ u64 ip;
+ u64 ax;
+ u64 cx;
+ u64 dx;
+ u64 bx;
+ u64 sp;
+ u64 bp;
+ u64 si;
+ u64 di;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+};
+
+struct pebs_xmm {
+ u64 xmm[32];
+};
+
+struct pebs_lbr_entry {
+ u64 from;
+ u64 to;
+ u64 info;
+};
+
+struct pebs_lbr {
+ struct pebs_lbr_entry lbr[0];
+};
+
+struct x86_perf_regs {
+ struct pt_regs regs;
+ u64 *xmm_regs;
+};
+
+typedef unsigned int insn_attr_t;
+
+typedef unsigned char insn_byte_t;
+
+typedef int insn_value_t;
+
+struct insn_field {
+ union {
+ insn_value_t value;
+ insn_byte_t bytes[4];
+ };
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+struct insn {
+ struct insn_field prefixes;
+ struct insn_field rex_prefix;
+ struct insn_field vex_prefix;
+ struct insn_field opcode;
+ struct insn_field modrm;
+ struct insn_field sib;
+ struct insn_field displacement;
+ union {
+ struct insn_field immediate;
+ struct insn_field moffset1;
+ struct insn_field immediate1;
+ };
+ union {
+ struct insn_field moffset2;
+ struct insn_field immediate2;
+ };
+ int emulate_prefix_size;
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+ unsigned char length;
+ unsigned char x86_64;
+ const insn_byte_t *kaddr;
+ const insn_byte_t *end_kaddr;
+ const insn_byte_t *next_byte;
+};
+
+enum {
+ PERF_TXN_ELISION = 1,
+ PERF_TXN_TRANSACTION = 2,
+ PERF_TXN_SYNC = 4,
+ PERF_TXN_ASYNC = 8,
+ PERF_TXN_RETRY = 16,
+ PERF_TXN_CONFLICT = 32,
+ PERF_TXN_CAPACITY_WRITE = 64,
+ PERF_TXN_CAPACITY_READ = 128,
+ PERF_TXN_MAX = 256,
+ PERF_TXN_ABORT_MASK = 0,
+ PERF_TXN_ABORT_SHIFT = 32,
+};
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+union intel_x86_pebs_dse {
+ u64 val;
+ struct {
+ unsigned int ld_dse: 4;
+ unsigned int ld_stlb_miss: 1;
+ unsigned int ld_locked: 1;
+ unsigned int ld_reserved: 26;
+ };
+ struct {
+ unsigned int st_l1d_hit: 1;
+ unsigned int st_reserved1: 3;
+ unsigned int st_stlb_miss: 1;
+ unsigned int st_locked: 1;
+ unsigned int st_reserved2: 26;
+ };
+};
+
+struct pebs_record_core {
+ u64 flags;
+ u64 ip;
+ u64 ax;
+ u64 bx;
+ u64 cx;
+ u64 dx;
+ u64 si;
+ u64 di;
+ u64 bp;
+ u64 sp;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+};
+
+struct pebs_record_nhm {
+ u64 flags;
+ u64 ip;
+ u64 ax;
+ u64 bx;
+ u64 cx;
+ u64 dx;
+ u64 si;
+ u64 di;
+ u64 bp;
+ u64 sp;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ u64 status;
+ u64 dla;
+ u64 dse;
+ u64 lat;
+};
+
+union hsw_tsx_tuning {
+ struct {
+ u32 cycles_last_block: 32;
+ u32 hle_abort: 1;
+ u32 rtm_abort: 1;
+ u32 instruction_abort: 1;
+ u32 non_instruction_abort: 1;
+ u32 retry: 1;
+ u32 data_conflict: 1;
+ u32 capacity_writes: 1;
+ u32 capacity_reads: 1;
+ };
+ u64 value;
+};
+
+struct pebs_record_skl {
+ u64 flags;
+ u64 ip;
+ u64 ax;
+ u64 bx;
+ u64 cx;
+ u64 dx;
+ u64 si;
+ u64 di;
+ u64 bp;
+ u64 sp;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ u64 status;
+ u64 dla;
+ u64 dse;
+ u64 lat;
+ u64 real_ip;
+ u64 tsx_tuning;
+ u64 tsc;
+};
+
+struct bts_record {
+ u64 from;
+ u64 to;
+ u64 flags;
+};
+
+enum {
+ PERF_BR_UNKNOWN = 0,
+ PERF_BR_COND = 1,
+ PERF_BR_UNCOND = 2,
+ PERF_BR_IND = 3,
+ PERF_BR_CALL = 4,
+ PERF_BR_IND_CALL = 5,
+ PERF_BR_RET = 6,
+ PERF_BR_SYSCALL = 7,
+ PERF_BR_SYSRET = 8,
+ PERF_BR_COND_CALL = 9,
+ PERF_BR_COND_RET = 10,
+ PERF_BR_MAX = 11,
+};
+
+enum {
+ LBR_FORMAT_32 = 0,
+ LBR_FORMAT_LIP = 1,
+ LBR_FORMAT_EIP = 2,
+ LBR_FORMAT_EIP_FLAGS = 3,
+ LBR_FORMAT_EIP_FLAGS2 = 4,
+ LBR_FORMAT_INFO = 5,
+ LBR_FORMAT_TIME = 6,
+ LBR_FORMAT_MAX_KNOWN = 6,
+};
+
+enum {
+ X86_BR_NONE = 0,
+ X86_BR_USER = 1,
+ X86_BR_KERNEL = 2,
+ X86_BR_CALL = 4,
+ X86_BR_RET = 8,
+ X86_BR_SYSCALL = 16,
+ X86_BR_SYSRET = 32,
+ X86_BR_INT = 64,
+ X86_BR_IRET = 128,
+ X86_BR_JCC = 256,
+ X86_BR_JMP = 512,
+ X86_BR_IRQ = 1024,
+ X86_BR_IND_CALL = 2048,
+ X86_BR_ABORT = 4096,
+ X86_BR_IN_TX = 8192,
+ X86_BR_NO_TX = 16384,
+ X86_BR_ZERO_CALL = 32768,
+ X86_BR_CALL_STACK = 65536,
+ X86_BR_IND_JMP = 131072,
+ X86_BR_TYPE_SAVE = 262144,
+};
+
+enum {
+ LBR_NONE = 0,
+ LBR_VALID = 1,
+};
+
+enum P4_EVENTS {
+ P4_EVENT_TC_DELIVER_MODE = 0,
+ P4_EVENT_BPU_FETCH_REQUEST = 1,
+ P4_EVENT_ITLB_REFERENCE = 2,
+ P4_EVENT_MEMORY_CANCEL = 3,
+ P4_EVENT_MEMORY_COMPLETE = 4,
+ P4_EVENT_LOAD_PORT_REPLAY = 5,
+ P4_EVENT_STORE_PORT_REPLAY = 6,
+ P4_EVENT_MOB_LOAD_REPLAY = 7,
+ P4_EVENT_PAGE_WALK_TYPE = 8,
+ P4_EVENT_BSQ_CACHE_REFERENCE = 9,
+ P4_EVENT_IOQ_ALLOCATION = 10,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES = 11,
+ P4_EVENT_FSB_DATA_ACTIVITY = 12,
+ P4_EVENT_BSQ_ALLOCATION = 13,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES = 14,
+ P4_EVENT_SSE_INPUT_ASSIST = 15,
+ P4_EVENT_PACKED_SP_UOP = 16,
+ P4_EVENT_PACKED_DP_UOP = 17,
+ P4_EVENT_SCALAR_SP_UOP = 18,
+ P4_EVENT_SCALAR_DP_UOP = 19,
+ P4_EVENT_64BIT_MMX_UOP = 20,
+ P4_EVENT_128BIT_MMX_UOP = 21,
+ P4_EVENT_X87_FP_UOP = 22,
+ P4_EVENT_TC_MISC = 23,
+ P4_EVENT_GLOBAL_POWER_EVENTS = 24,
+ P4_EVENT_TC_MS_XFER = 25,
+ P4_EVENT_UOP_QUEUE_WRITES = 26,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE = 27,
+ P4_EVENT_RETIRED_BRANCH_TYPE = 28,
+ P4_EVENT_RESOURCE_STALL = 29,
+ P4_EVENT_WC_BUFFER = 30,
+ P4_EVENT_B2B_CYCLES = 31,
+ P4_EVENT_BNR = 32,
+ P4_EVENT_SNOOP = 33,
+ P4_EVENT_RESPONSE = 34,
+ P4_EVENT_FRONT_END_EVENT = 35,
+ P4_EVENT_EXECUTION_EVENT = 36,
+ P4_EVENT_REPLAY_EVENT = 37,
+ P4_EVENT_INSTR_RETIRED = 38,
+ P4_EVENT_UOPS_RETIRED = 39,
+ P4_EVENT_UOP_TYPE = 40,
+ P4_EVENT_BRANCH_RETIRED = 41,
+ P4_EVENT_MISPRED_BRANCH_RETIRED = 42,
+ P4_EVENT_X87_ASSIST = 43,
+ P4_EVENT_MACHINE_CLEAR = 44,
+ P4_EVENT_INSTR_COMPLETED = 45,
+};
+
+enum P4_EVENT_OPCODES {
+ P4_EVENT_TC_DELIVER_MODE_OPCODE = 257,
+ P4_EVENT_BPU_FETCH_REQUEST_OPCODE = 768,
+ P4_EVENT_ITLB_REFERENCE_OPCODE = 6147,
+ P4_EVENT_MEMORY_CANCEL_OPCODE = 517,
+ P4_EVENT_MEMORY_COMPLETE_OPCODE = 2050,
+ P4_EVENT_LOAD_PORT_REPLAY_OPCODE = 1026,
+ P4_EVENT_STORE_PORT_REPLAY_OPCODE = 1282,
+ P4_EVENT_MOB_LOAD_REPLAY_OPCODE = 770,
+ P4_EVENT_PAGE_WALK_TYPE_OPCODE = 260,
+ P4_EVENT_BSQ_CACHE_REFERENCE_OPCODE = 3079,
+ P4_EVENT_IOQ_ALLOCATION_OPCODE = 774,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES_OPCODE = 6662,
+ P4_EVENT_FSB_DATA_ACTIVITY_OPCODE = 5894,
+ P4_EVENT_BSQ_ALLOCATION_OPCODE = 1287,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES_OPCODE = 1543,
+ P4_EVENT_SSE_INPUT_ASSIST_OPCODE = 13313,
+ P4_EVENT_PACKED_SP_UOP_OPCODE = 2049,
+ P4_EVENT_PACKED_DP_UOP_OPCODE = 3073,
+ P4_EVENT_SCALAR_SP_UOP_OPCODE = 2561,
+ P4_EVENT_SCALAR_DP_UOP_OPCODE = 3585,
+ P4_EVENT_64BIT_MMX_UOP_OPCODE = 513,
+ P4_EVENT_128BIT_MMX_UOP_OPCODE = 6657,
+ P4_EVENT_X87_FP_UOP_OPCODE = 1025,
+ P4_EVENT_TC_MISC_OPCODE = 1537,
+ P4_EVENT_GLOBAL_POWER_EVENTS_OPCODE = 4870,
+ P4_EVENT_TC_MS_XFER_OPCODE = 1280,
+ P4_EVENT_UOP_QUEUE_WRITES_OPCODE = 2304,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE_OPCODE = 1282,
+ P4_EVENT_RETIRED_BRANCH_TYPE_OPCODE = 1026,
+ P4_EVENT_RESOURCE_STALL_OPCODE = 257,
+ P4_EVENT_WC_BUFFER_OPCODE = 1285,
+ P4_EVENT_B2B_CYCLES_OPCODE = 5635,
+ P4_EVENT_BNR_OPCODE = 2051,
+ P4_EVENT_SNOOP_OPCODE = 1539,
+ P4_EVENT_RESPONSE_OPCODE = 1027,
+ P4_EVENT_FRONT_END_EVENT_OPCODE = 2053,
+ P4_EVENT_EXECUTION_EVENT_OPCODE = 3077,
+ P4_EVENT_REPLAY_EVENT_OPCODE = 2309,
+ P4_EVENT_INSTR_RETIRED_OPCODE = 516,
+ P4_EVENT_UOPS_RETIRED_OPCODE = 260,
+ P4_EVENT_UOP_TYPE_OPCODE = 514,
+ P4_EVENT_BRANCH_RETIRED_OPCODE = 1541,
+ P4_EVENT_MISPRED_BRANCH_RETIRED_OPCODE = 772,
+ P4_EVENT_X87_ASSIST_OPCODE = 773,
+ P4_EVENT_MACHINE_CLEAR_OPCODE = 517,
+ P4_EVENT_INSTR_COMPLETED_OPCODE = 1796,
+};
+
+enum P4_ESCR_EMASKS {
+ P4_EVENT_TC_DELIVER_MODE__DD = 512,
+ P4_EVENT_TC_DELIVER_MODE__DB = 1024,
+ P4_EVENT_TC_DELIVER_MODE__DI = 2048,
+ P4_EVENT_TC_DELIVER_MODE__BD = 4096,
+ P4_EVENT_TC_DELIVER_MODE__BB = 8192,
+ P4_EVENT_TC_DELIVER_MODE__BI = 16384,
+ P4_EVENT_TC_DELIVER_MODE__ID = 32768,
+ P4_EVENT_BPU_FETCH_REQUEST__TCMISS = 512,
+ P4_EVENT_ITLB_REFERENCE__HIT = 512,
+ P4_EVENT_ITLB_REFERENCE__MISS = 1024,
+ P4_EVENT_ITLB_REFERENCE__HIT_UK = 2048,
+ P4_EVENT_MEMORY_CANCEL__ST_RB_FULL = 2048,
+ P4_EVENT_MEMORY_CANCEL__64K_CONF = 4096,
+ P4_EVENT_MEMORY_COMPLETE__LSC = 512,
+ P4_EVENT_MEMORY_COMPLETE__SSC = 1024,
+ P4_EVENT_LOAD_PORT_REPLAY__SPLIT_LD = 1024,
+ P4_EVENT_STORE_PORT_REPLAY__SPLIT_ST = 1024,
+ P4_EVENT_MOB_LOAD_REPLAY__NO_STA = 1024,
+ P4_EVENT_MOB_LOAD_REPLAY__NO_STD = 4096,
+ P4_EVENT_MOB_LOAD_REPLAY__PARTIAL_DATA = 8192,
+ P4_EVENT_MOB_LOAD_REPLAY__UNALGN_ADDR = 16384,
+ P4_EVENT_PAGE_WALK_TYPE__DTMISS = 512,
+ P4_EVENT_PAGE_WALK_TYPE__ITMISS = 1024,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITS = 512,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITE = 1024,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITM = 2048,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITS = 4096,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITE = 8192,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITM = 16384,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_MISS = 131072,
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_MISS = 262144,
+ P4_EVENT_BSQ_CACHE_REFERENCE__WR_2ndL_MISS = 524288,
+ P4_EVENT_IOQ_ALLOCATION__DEFAULT = 512,
+ P4_EVENT_IOQ_ALLOCATION__ALL_READ = 16384,
+ P4_EVENT_IOQ_ALLOCATION__ALL_WRITE = 32768,
+ P4_EVENT_IOQ_ALLOCATION__MEM_UC = 65536,
+ P4_EVENT_IOQ_ALLOCATION__MEM_WC = 131072,
+ P4_EVENT_IOQ_ALLOCATION__MEM_WT = 262144,
+ P4_EVENT_IOQ_ALLOCATION__MEM_WP = 524288,
+ P4_EVENT_IOQ_ALLOCATION__MEM_WB = 1048576,
+ P4_EVENT_IOQ_ALLOCATION__OWN = 4194304,
+ P4_EVENT_IOQ_ALLOCATION__OTHER = 8388608,
+ P4_EVENT_IOQ_ALLOCATION__PREFETCH = 16777216,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__DEFAULT = 512,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_READ = 16384,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_WRITE = 32768,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_UC = 65536,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WC = 131072,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WT = 262144,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WP = 524288,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WB = 1048576,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__OWN = 4194304,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__OTHER = 8388608,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__PREFETCH = 16777216,
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_DRV = 512,
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OWN = 1024,
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OTHER = 2048,
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_DRV = 4096,
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OWN = 8192,
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OTHER = 16384,
+ P4_EVENT_BSQ_ALLOCATION__REQ_TYPE0 = 512,
+ P4_EVENT_BSQ_ALLOCATION__REQ_TYPE1 = 1024,
+ P4_EVENT_BSQ_ALLOCATION__REQ_LEN0 = 2048,
+ P4_EVENT_BSQ_ALLOCATION__REQ_LEN1 = 4096,
+ P4_EVENT_BSQ_ALLOCATION__REQ_IO_TYPE = 16384,
+ P4_EVENT_BSQ_ALLOCATION__REQ_LOCK_TYPE = 32768,
+ P4_EVENT_BSQ_ALLOCATION__REQ_CACHE_TYPE = 65536,
+ P4_EVENT_BSQ_ALLOCATION__REQ_SPLIT_TYPE = 131072,
+ P4_EVENT_BSQ_ALLOCATION__REQ_DEM_TYPE = 262144,
+ P4_EVENT_BSQ_ALLOCATION__REQ_ORD_TYPE = 524288,
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE0 = 1048576,
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE1 = 2097152,
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE2 = 4194304,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE0 = 512,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE1 = 1024,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN0 = 2048,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN1 = 4096,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_IO_TYPE = 16384,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LOCK_TYPE = 32768,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_CACHE_TYPE = 65536,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_SPLIT_TYPE = 131072,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_DEM_TYPE = 262144,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_ORD_TYPE = 524288,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE0 = 1048576,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE1 = 2097152,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE2 = 4194304,
+ P4_EVENT_SSE_INPUT_ASSIST__ALL = 16777216,
+ P4_EVENT_PACKED_SP_UOP__ALL = 16777216,
+ P4_EVENT_PACKED_DP_UOP__ALL = 16777216,
+ P4_EVENT_SCALAR_SP_UOP__ALL = 16777216,
+ P4_EVENT_SCALAR_DP_UOP__ALL = 16777216,
+ P4_EVENT_64BIT_MMX_UOP__ALL = 16777216,
+ P4_EVENT_128BIT_MMX_UOP__ALL = 16777216,
+ P4_EVENT_X87_FP_UOP__ALL = 16777216,
+ P4_EVENT_TC_MISC__FLUSH = 8192,
+ P4_EVENT_GLOBAL_POWER_EVENTS__RUNNING = 512,
+ P4_EVENT_TC_MS_XFER__CISC = 512,
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_BUILD = 512,
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_DELIVER = 1024,
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_ROM = 2048,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CONDITIONAL = 1024,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CALL = 2048,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__RETURN = 4096,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__INDIRECT = 8192,
+ P4_EVENT_RETIRED_BRANCH_TYPE__CONDITIONAL = 1024,
+ P4_EVENT_RETIRED_BRANCH_TYPE__CALL = 2048,
+ P4_EVENT_RETIRED_BRANCH_TYPE__RETURN = 4096,
+ P4_EVENT_RETIRED_BRANCH_TYPE__INDIRECT = 8192,
+ P4_EVENT_RESOURCE_STALL__SBFULL = 16384,
+ P4_EVENT_WC_BUFFER__WCB_EVICTS = 512,
+ P4_EVENT_WC_BUFFER__WCB_FULL_EVICTS = 1024,
+ P4_EVENT_FRONT_END_EVENT__NBOGUS = 512,
+ P4_EVENT_FRONT_END_EVENT__BOGUS = 1024,
+ P4_EVENT_EXECUTION_EVENT__NBOGUS0 = 512,
+ P4_EVENT_EXECUTION_EVENT__NBOGUS1 = 1024,
+ P4_EVENT_EXECUTION_EVENT__NBOGUS2 = 2048,
+ P4_EVENT_EXECUTION_EVENT__NBOGUS3 = 4096,
+ P4_EVENT_EXECUTION_EVENT__BOGUS0 = 8192,
+ P4_EVENT_EXECUTION_EVENT__BOGUS1 = 16384,
+ P4_EVENT_EXECUTION_EVENT__BOGUS2 = 32768,
+ P4_EVENT_EXECUTION_EVENT__BOGUS3 = 65536,
+ P4_EVENT_REPLAY_EVENT__NBOGUS = 512,
+ P4_EVENT_REPLAY_EVENT__BOGUS = 1024,
+ P4_EVENT_INSTR_RETIRED__NBOGUSNTAG = 512,
+ P4_EVENT_INSTR_RETIRED__NBOGUSTAG = 1024,
+ P4_EVENT_INSTR_RETIRED__BOGUSNTAG = 2048,
+ P4_EVENT_INSTR_RETIRED__BOGUSTAG = 4096,
+ P4_EVENT_UOPS_RETIRED__NBOGUS = 512,
+ P4_EVENT_UOPS_RETIRED__BOGUS = 1024,
+ P4_EVENT_UOP_TYPE__TAGLOADS = 1024,
+ P4_EVENT_UOP_TYPE__TAGSTORES = 2048,
+ P4_EVENT_BRANCH_RETIRED__MMNP = 512,
+ P4_EVENT_BRANCH_RETIRED__MMNM = 1024,
+ P4_EVENT_BRANCH_RETIRED__MMTP = 2048,
+ P4_EVENT_BRANCH_RETIRED__MMTM = 4096,
+ P4_EVENT_MISPRED_BRANCH_RETIRED__NBOGUS = 512,
+ P4_EVENT_X87_ASSIST__FPSU = 512,
+ P4_EVENT_X87_ASSIST__FPSO = 1024,
+ P4_EVENT_X87_ASSIST__POAO = 2048,
+ P4_EVENT_X87_ASSIST__POAU = 4096,
+ P4_EVENT_X87_ASSIST__PREA = 8192,
+ P4_EVENT_MACHINE_CLEAR__CLEAR = 512,
+ P4_EVENT_MACHINE_CLEAR__MOCLEAR = 1024,
+ P4_EVENT_MACHINE_CLEAR__SMCLEAR = 2048,
+ P4_EVENT_INSTR_COMPLETED__NBOGUS = 512,
+ P4_EVENT_INSTR_COMPLETED__BOGUS = 1024,
+};
+
+enum P4_PEBS_METRIC {
+ P4_PEBS_METRIC__none = 0,
+ P4_PEBS_METRIC__1stl_cache_load_miss_retired = 1,
+ P4_PEBS_METRIC__2ndl_cache_load_miss_retired = 2,
+ P4_PEBS_METRIC__dtlb_load_miss_retired = 3,
+ P4_PEBS_METRIC__dtlb_store_miss_retired = 4,
+ P4_PEBS_METRIC__dtlb_all_miss_retired = 5,
+ P4_PEBS_METRIC__tagged_mispred_branch = 6,
+ P4_PEBS_METRIC__mob_load_replay_retired = 7,
+ P4_PEBS_METRIC__split_load_retired = 8,
+ P4_PEBS_METRIC__split_store_retired = 9,
+ P4_PEBS_METRIC__max = 10,
+};
+
+struct p4_event_bind {
+ unsigned int opcode;
+ unsigned int escr_msr[2];
+ unsigned int escr_emask;
+ unsigned int shared;
+ char cntr[6];
+};
+
+struct p4_pebs_bind {
+ unsigned int metric_pebs;
+ unsigned int metric_vert;
+};
+
+struct p4_event_alias {
+ u64 original;
+ u64 alternative;
+};
+
+enum cpuid_regs_idx {
+ CPUID_EAX = 0,
+ CPUID_EBX = 1,
+ CPUID_ECX = 2,
+ CPUID_EDX = 3,
+};
+
+struct dev_ext_attribute {
+ struct device_attribute attr;
+ void *var;
+};
+
+enum pt_capabilities {
+ PT_CAP_max_subleaf = 0,
+ PT_CAP_cr3_filtering = 1,
+ PT_CAP_psb_cyc = 2,
+ PT_CAP_ip_filtering = 3,
+ PT_CAP_mtc = 4,
+ PT_CAP_ptwrite = 5,
+ PT_CAP_power_event_trace = 6,
+ PT_CAP_topa_output = 7,
+ PT_CAP_topa_multiple_entries = 8,
+ PT_CAP_single_range_output = 9,
+ PT_CAP_output_subsys = 10,
+ PT_CAP_payloads_lip = 11,
+ PT_CAP_num_address_ranges = 12,
+ PT_CAP_mtc_periods = 13,
+ PT_CAP_cycle_thresholds = 14,
+ PT_CAP_psb_periods = 15,
+};
+
+enum perf_addr_filter_action_t {
+ PERF_ADDR_FILTER_ACTION_STOP = 0,
+ PERF_ADDR_FILTER_ACTION_START = 1,
+ PERF_ADDR_FILTER_ACTION_FILTER = 2,
+};
+
+struct perf_addr_filter {
+ struct list_head entry;
+ struct path path;
+ long unsigned int offset;
+ long unsigned int size;
+ enum perf_addr_filter_action_t action;
+};
+
+struct topa_entry {
+ u64 end: 1;
+ u64 rsvd0: 1;
+ u64 intr: 1;
+ u64 rsvd1: 1;
+ u64 stop: 1;
+ u64 rsvd2: 1;
+ u64 size: 4;
+ u64 rsvd3: 2;
+ u64 base: 36;
+ u64 rsvd4: 16;
+};
+
+struct pt_pmu {
+ struct pmu pmu;
+ u32 caps[8];
+ bool vmx;
+ bool branch_en_always_on;
+ long unsigned int max_nonturbo_ratio;
+ unsigned int tsc_art_num;
+ unsigned int tsc_art_den;
+};
+
+struct topa;
+
+struct pt_buffer {
+ struct list_head tables;
+ struct topa *first;
+ struct topa *last;
+ struct topa *cur;
+ unsigned int cur_idx;
+ size_t output_off;
+ long unsigned int nr_pages;
+ local_t data_size;
+ local64_t head;
+ bool snapshot;
+ bool single;
+ long int stop_pos;
+ long int intr_pos;
+ struct topa_entry *stop_te;
+ struct topa_entry *intr_te;
+ void **data_pages;
+};
+
+struct topa {
+ struct list_head list;
+ u64 offset;
+ size_t size;
+ int last;
+ unsigned int z_count;
+};
+
+struct pt_filter {
+ long unsigned int msr_a;
+ long unsigned int msr_b;
+ long unsigned int config;
+};
+
+struct pt_filters {
+ struct pt_filter filter[4];
+ unsigned int nr_filters;
+};
+
+struct pt {
+ struct perf_output_handle handle;
+ struct pt_filters filters;
+ int handle_nmi;
+ int vmx_on;
+ u64 output_base;
+ u64 output_mask;
+};
+
+struct pt_cap_desc {
+ const char *name;
+ u32 leaf;
+ u8 reg;
+ u32 mask;
+};
+
+struct pt_address_range {
+ long unsigned int msr_a;
+ long unsigned int msr_b;
+ unsigned int reg_off;
+};
+
+struct topa_page {
+ struct topa_entry table[507];
+ struct topa topa;
+};
+
+struct console___2;
+
+enum x86_hypervisor_type {
+ X86_HYPER_NATIVE = 0,
+ X86_HYPER_VMWARE = 1,
+ X86_HYPER_MS_HYPERV = 2,
+ X86_HYPER_XEN_PV = 3,
+ X86_HYPER_XEN_HVM = 4,
+ X86_HYPER_KVM = 5,
+ X86_HYPER_JAILHOUSE = 6,
+ X86_HYPER_ACRN = 7,
+};
+
+struct shared_info;
+
+struct start_info;
+
+struct hypervisor_x86 {
+ const char *name;
+ uint32_t (*detect)();
+ enum x86_hypervisor_type type;
+ struct x86_hyper_init init;
+ struct x86_hyper_runtime runtime;
+ bool ignore_nopv;
+};
+
+struct hvm_start_info {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t flags;
+ uint32_t nr_modules;
+ uint64_t modlist_paddr;
+ uint64_t cmdline_paddr;
+ uint64_t rsdp_paddr;
+ uint64_t memmap_paddr;
+ uint32_t memmap_entries;
+ uint32_t reserved;
+};
+
+struct hvm_modlist_entry {
+ uint64_t paddr;
+ uint64_t size;
+ uint64_t cmdline_paddr;
+ uint64_t reserved;
+};
+
+struct hvm_memmap_table_entry {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t type;
+ uint32_t reserved;
+};
+
+struct real_mode_header {
+ u32 text_start;
+ u32 ro_end;
+ u32 trampoline_start;
+ u32 trampoline_header;
+ u32 trampoline_pgd;
+ u32 machine_real_restart_asm;
+ u32 machine_real_restart_seg;
+};
+
+struct trampoline_header {
+ u64 start;
+ u64 efer;
+ u32 cr4;
+ u32 flags;
+};
+
+enum xfeature {
+ XFEATURE_FP = 0,
+ XFEATURE_SSE = 1,
+ XFEATURE_YMM = 2,
+ XFEATURE_BNDREGS = 3,
+ XFEATURE_BNDCSR = 4,
+ XFEATURE_OPMASK = 5,
+ XFEATURE_ZMM_Hi256 = 6,
+ XFEATURE_Hi16_ZMM = 7,
+ XFEATURE_PT_UNIMPLEMENTED_SO_FAR = 8,
+ XFEATURE_PKRU = 9,
+ XFEATURE_MAX = 10,
+};
+
+struct pkru_state {
+ u32 pkru;
+ u32 pad;
+};
+
+enum show_regs_mode {
+ SHOW_REGS_SHORT = 0,
+ SHOW_REGS_USER = 1,
+ SHOW_REGS_ALL = 2,
+};
+
+enum which_selector {
+ FS = 0,
+ GS = 1,
+};
+
+typedef struct task_struct *pto_T_____3;
+
+typedef u64 pto_T_____4;
+
+struct sigcontext_64 {
+ __u64 r8;
+ __u64 r9;
+ __u64 r10;
+ __u64 r11;
+ __u64 r12;
+ __u64 r13;
+ __u64 r14;
+ __u64 r15;
+ __u64 di;
+ __u64 si;
+ __u64 bp;
+ __u64 bx;
+ __u64 dx;
+ __u64 ax;
+ __u64 cx;
+ __u64 sp;
+ __u64 ip;
+ __u64 flags;
+ __u16 cs;
+ __u16 gs;
+ __u16 fs;
+ __u16 ss;
+ __u64 err;
+ __u64 trapno;
+ __u64 oldmask;
+ __u64 cr2;
+ __u64 fpstate;
+ __u64 reserved1[8];
+};
+
+typedef sigset_t compat_sigset_t;
+
+struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+};
+
+typedef struct sigaltstack stack_t;
+
+struct siginfo {
+ union {
+ struct {
+ int si_signo;
+ int si_errno;
+ int si_code;
+ union __sifields _sifields;
+ };
+ int _si_pad[32];
+ };
+};
+
+struct ucontext {
+ long unsigned int uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext_64 uc_mcontext;
+ sigset_t uc_sigmask;
+};
+
+struct mce {
+ __u64 status;
+ __u64 misc;
+ __u64 addr;
+ __u64 mcgstatus;
+ __u64 ip;
+ __u64 tsc;
+ __u64 time;
+ __u8 cpuvendor;
+ __u8 inject_flags;
+ __u8 severity;
+ __u8 pad;
+ __u32 cpuid;
+ __u8 cs;
+ __u8 bank;
+ __u8 cpu;
+ __u8 finished;
+ __u32 extcpu;
+ __u32 socketid;
+ __u32 apicid;
+ __u64 mcgcap;
+ __u64 synd;
+ __u64 ipid;
+ __u64 ppin;
+ __u32 microcode;
+ __u64 kflags;
+};
+
+typedef long unsigned int mce_banks_t[1];
+
+struct kernel_vm86_regs {
+ struct pt_regs pt;
+ short unsigned int es;
+ short unsigned int __esh;
+ short unsigned int ds;
+ short unsigned int __dsh;
+ short unsigned int fs;
+ short unsigned int __fsh;
+ short unsigned int gs;
+ short unsigned int __gsh;
+};
+
+struct rt_sigframe {
+ char *pretcode;
+ struct ucontext uc;
+ struct siginfo info;
+};
+
+enum bug_trap_type {
+ BUG_TRAP_TYPE_NONE = 0,
+ BUG_TRAP_TYPE_WARN = 1,
+ BUG_TRAP_TYPE_BUG = 2,
+};
+
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_INT3 = 2,
+ DIE_DEBUG = 3,
+ DIE_PANIC = 4,
+ DIE_NMI = 5,
+ DIE_DIE = 6,
+ DIE_KERNELDEBUG = 7,
+ DIE_TRAP = 8,
+ DIE_GPF = 9,
+ DIE_CALL = 10,
+ DIE_PAGE_FAULT = 11,
+ DIE_NMIUNKNOWN = 12,
+};
+
+enum kernel_gp_hint {
+ GP_NO_HINT = 0,
+ GP_NON_CANONICAL = 1,
+ GP_CANONICAL = 2,
+};
+
+struct bad_iret_stack {
+ void *error_entry_ret;
+ struct pt_regs regs;
+};
+
+enum {
+ GATE_INTERRUPT = 14,
+ GATE_TRAP = 15,
+ GATE_CALL = 12,
+ GATE_TASK = 5,
+};
+
+struct idt_bits {
+ u16 ist: 3;
+ u16 zero: 5;
+ u16 type: 5;
+ u16 dpl: 2;
+ u16 p: 1;
+};
+
+struct gate_struct {
+ u16 offset_low;
+ u16 segment;
+ struct idt_bits bits;
+ u16 offset_middle;
+ u32 offset_high;
+ u32 reserved;
+};
+
+typedef struct gate_struct gate_desc;
+
+struct idt_data {
+ unsigned int vector;
+ unsigned int segment;
+ struct idt_bits bits;
+ const void *addr;
+};
+
+enum irqreturn {
+ IRQ_NONE = 0,
+ IRQ_HANDLED = 1,
+ IRQ_WAKE_THREAD = 2,
+};
+
+typedef enum irqreturn irqreturn_t;
+
+typedef irqreturn_t (*irq_handler_t)(int, void *);
+
+struct irqaction {
+ irq_handler_t handler;
+ void *dev_id;
+ void *percpu_dev_id;
+ struct irqaction *next;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ struct irqaction *secondary;
+ unsigned int irq;
+ unsigned int flags;
+ long unsigned int thread_flags;
+ long unsigned int thread_mask;
+ const char *name;
+ struct proc_dir_entry *dir;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *);
+ void (*release)(struct kref *);
+};
+
+enum irqchip_irq_state {
+ IRQCHIP_STATE_PENDING = 0,
+ IRQCHIP_STATE_ACTIVE = 1,
+ IRQCHIP_STATE_MASKED = 2,
+ IRQCHIP_STATE_LINE_LEVEL = 3,
+};
+
+struct irq_desc___2;
+
+typedef void (*irq_flow_handler_t)(struct irq_desc___2 *);
+
+struct msi_desc;
+
+struct irq_common_data {
+ unsigned int state_use_accessors;
+ void *handler_data;
+ struct msi_desc *msi_desc;
+ cpumask_var_t affinity;
+ cpumask_var_t effective_affinity;
+};
+
+struct irq_chip;
+
+struct irq_data {
+ u32 mask;
+ unsigned int irq;
+ long unsigned int hwirq;
+ struct irq_common_data *common;
+ struct irq_chip *chip;
+ struct irq_domain *domain;
+ struct irq_data *parent_data;
+ void *chip_data;
+};
+
+struct irq_desc___2 {
+ struct irq_common_data irq_common_data;
+ struct irq_data irq_data;
+ unsigned int *kstat_irqs;
+ irq_flow_handler_t handle_irq;
+ struct irqaction *action;
+ unsigned int status_use_accessors;
+ unsigned int core_internal_state__do_not_mess_with_it;
+ unsigned int depth;
+ unsigned int wake_depth;
+ unsigned int tot_count;
+ unsigned int irq_count;
+ long unsigned int last_unhandled;
+ unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ const struct cpumask *percpu_affinity;
+ const struct cpumask *affinity_hint;
+ struct irq_affinity_notify *affinity_notify;
+ cpumask_var_t pending_mask;
+ long unsigned int threads_oneshot;
+ atomic_t threads_active;
+ wait_queue_head_t wait_for_threads;
+ struct proc_dir_entry *dir;
+ struct callback_head rcu;
+ struct kobject kobj;
+ struct mutex request_mutex;
+ int parent_irq;
+ struct module *owner;
+ const char *name;
+ long: 64;
+};
+
+struct msi_msg;
+
+struct irq_chip {
+ struct device *parent_device;
+ const char *name;
+ unsigned int (*irq_startup)(struct irq_data *);
+ void (*irq_shutdown)(struct irq_data *);
+ void (*irq_enable)(struct irq_data *);
+ void (*irq_disable)(struct irq_data *);
+ void (*irq_ack)(struct irq_data *);
+ void (*irq_mask)(struct irq_data *);
+ void (*irq_mask_ack)(struct irq_data *);
+ void (*irq_unmask)(struct irq_data *);
+ void (*irq_eoi)(struct irq_data *);
+ int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool);
+ int (*irq_retrigger)(struct irq_data *);
+ int (*irq_set_type)(struct irq_data *, unsigned int);
+ int (*irq_set_wake)(struct irq_data *, unsigned int);
+ void (*irq_bus_lock)(struct irq_data *);
+ void (*irq_bus_sync_unlock)(struct irq_data *);
+ void (*irq_cpu_online)(struct irq_data *);
+ void (*irq_cpu_offline)(struct irq_data *);
+ void (*irq_suspend)(struct irq_data *);
+ void (*irq_resume)(struct irq_data *);
+ void (*irq_pm_shutdown)(struct irq_data *);
+ void (*irq_calc_mask)(struct irq_data *);
+ void (*irq_print_chip)(struct irq_data *, struct seq_file *);
+ int (*irq_request_resources)(struct irq_data *);
+ void (*irq_release_resources)(struct irq_data *);
+ void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *);
+ void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *);
+ int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *);
+ int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool);
+ int (*irq_set_vcpu_affinity)(struct irq_data *, void *);
+ void (*ipi_send_single)(struct irq_data *, unsigned int);
+ void (*ipi_send_mask)(struct irq_data *, const struct cpumask *);
+ int (*irq_nmi_setup)(struct irq_data *);
+ void (*irq_nmi_teardown)(struct irq_data *);
+ long unsigned int flags;
+};
+
+typedef struct irq_desc___2 *vector_irq_t___2[256];
+
+struct trace_event_raw_x86_irq_vector {
+ struct trace_entry ent;
+ int vector;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_config {
+ struct trace_entry ent;
+ unsigned int irq;
+ unsigned int vector;
+ unsigned int cpu;
+ unsigned int apicdest;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_mod {
+ struct trace_entry ent;
+ unsigned int irq;
+ unsigned int vector;
+ unsigned int cpu;
+ unsigned int prev_vector;
+ unsigned int prev_cpu;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_reserve {
+ struct trace_entry ent;
+ unsigned int irq;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_alloc {
+ struct trace_entry ent;
+ unsigned int irq;
+ unsigned int vector;
+ bool reserved;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_alloc_managed {
+ struct trace_entry ent;
+ unsigned int irq;
+ unsigned int vector;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_activate {
+ struct trace_entry ent;
+ unsigned int irq;
+ bool is_managed;
+ bool can_reserve;
+ bool reserve;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_teardown {
+ struct trace_entry ent;
+ unsigned int irq;
+ bool is_managed;
+ bool has_reserved;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_setup {
+ struct trace_entry ent;
+ unsigned int irq;
+ bool is_legacy;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_vector_free_moved {
+ struct trace_entry ent;
+ unsigned int irq;
+ unsigned int cpu;
+ unsigned int vector;
+ bool is_managed;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_x86_irq_vector {};
+
+struct trace_event_data_offsets_vector_config {};
+
+struct trace_event_data_offsets_vector_mod {};
+
+struct trace_event_data_offsets_vector_reserve {};
+
+struct trace_event_data_offsets_vector_alloc {};
+
+struct trace_event_data_offsets_vector_alloc_managed {};
+
+struct trace_event_data_offsets_vector_activate {};
+
+struct trace_event_data_offsets_vector_teardown {};
+
+struct trace_event_data_offsets_vector_setup {};
+
+struct trace_event_data_offsets_vector_free_moved {};
+
+typedef void (*btf_trace_local_timer_entry)(void *, int);
+
+typedef void (*btf_trace_local_timer_exit)(void *, int);
+
+typedef void (*btf_trace_spurious_apic_entry)(void *, int);
+
+typedef void (*btf_trace_spurious_apic_exit)(void *, int);
+
+typedef void (*btf_trace_error_apic_entry)(void *, int);
+
+typedef void (*btf_trace_error_apic_exit)(void *, int);
+
+typedef void (*btf_trace_x86_platform_ipi_entry)(void *, int);
+
+typedef void (*btf_trace_x86_platform_ipi_exit)(void *, int);
+
+typedef void (*btf_trace_irq_work_entry)(void *, int);
+
+typedef void (*btf_trace_irq_work_exit)(void *, int);
+
+typedef void (*btf_trace_reschedule_entry)(void *, int);
+
+typedef void (*btf_trace_reschedule_exit)(void *, int);
+
+typedef void (*btf_trace_call_function_entry)(void *, int);
+
+typedef void (*btf_trace_call_function_exit)(void *, int);
+
+typedef void (*btf_trace_call_function_single_entry)(void *, int);
+
+typedef void (*btf_trace_call_function_single_exit)(void *, int);
+
+typedef void (*btf_trace_vector_config)(void *, unsigned int, unsigned int, unsigned int, unsigned int);
+
+typedef void (*btf_trace_vector_update)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int);
+
+typedef void (*btf_trace_vector_clear)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int);
+
+typedef void (*btf_trace_vector_reserve_managed)(void *, unsigned int, int);
+
+typedef void (*btf_trace_vector_reserve)(void *, unsigned int, int);
+
+typedef void (*btf_trace_vector_alloc)(void *, unsigned int, unsigned int, bool, int);
+
+typedef void (*btf_trace_vector_alloc_managed)(void *, unsigned int, unsigned int, int);
+
+typedef void (*btf_trace_vector_activate)(void *, unsigned int, bool, bool, bool);
+
+typedef void (*btf_trace_vector_deactivate)(void *, unsigned int, bool, bool, bool);
+
+typedef void (*btf_trace_vector_teardown)(void *, unsigned int, bool, bool);
+
+typedef void (*btf_trace_vector_setup)(void *, unsigned int, bool, int);
+
+typedef void (*btf_trace_vector_free_moved)(void *, unsigned int, unsigned int, unsigned int, bool);
+
+typedef struct irq_desc___2 *pto_T_____5;
+
+typedef struct pt_regs *pto_T_____6;
+
+struct estack_pages {
+ u32 offs;
+ u16 size;
+ u16 type;
+};
+
+struct clocksource {
+ u64 (*read)(struct clocksource *);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
+ u64 max_cycles;
+ const char *name;
+ struct list_head list;
+ int rating;
+ enum vdso_clock_mode vdso_clock_mode;
+ long unsigned int flags;
+ int (*enable)(struct clocksource *);
+ void (*disable)(struct clocksource *);
+ void (*suspend)(struct clocksource *);
+ void (*resume)(struct clocksource *);
+ void (*mark_unstable)(struct clocksource *);
+ void (*tick_stable)(struct clocksource *);
+ struct list_head wd_list;
+ u64 cs_last;
+ u64 wd_last;
+ struct module *owner;
+};
+
+enum clock_event_state {
+ CLOCK_EVT_STATE_DETACHED = 0,
+ CLOCK_EVT_STATE_SHUTDOWN = 1,
+ CLOCK_EVT_STATE_PERIODIC = 2,
+ CLOCK_EVT_STATE_ONESHOT = 3,
+ CLOCK_EVT_STATE_ONESHOT_STOPPED = 4,
+};
+
+struct clock_event_device {
+ void (*event_handler)(struct clock_event_device *);
+ int (*set_next_event)(long unsigned int, struct clock_event_device *);
+ int (*set_next_ktime)(ktime_t, struct clock_event_device *);
+ ktime_t next_event;
+ u64 max_delta_ns;
+ u64 min_delta_ns;
+ u32 mult;
+ u32 shift;
+ enum clock_event_state state_use_accessors;
+ unsigned int features;
+ long unsigned int retries;
+ int (*set_state_periodic)(struct clock_event_device *);
+ int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
+ int (*set_state_shutdown)(struct clock_event_device *);
+ int (*tick_resume)(struct clock_event_device *);
+ void (*broadcast)(const struct cpumask *);
+ void (*suspend)(struct clock_event_device *);
+ void (*resume)(struct clock_event_device *);
+ long unsigned int min_delta_ticks;
+ long unsigned int max_delta_ticks;
+ const char *name;
+ int rating;
+ int irq;
+ int bound_on;
+ const struct cpumask *cpumask;
+ struct list_head list;
+ struct module *owner;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed: 1;
+};
+
+struct msi_msg {
+ u32 address_lo;
+ u32 address_hi;
+ u32 data;
+};
+
+struct platform_msi_priv_data;
+
+struct platform_msi_desc {
+ struct platform_msi_priv_data *msi_priv_data;
+ u16 msi_index;
+};
+
+struct fsl_mc_msi_desc {
+ u16 msi_index;
+};
+
+struct ti_sci_inta_msi_desc {
+ u16 dev_index;
+};
+
+struct msi_desc {
+ struct list_head list;
+ unsigned int irq;
+ unsigned int nvec_used;
+ struct device *dev;
+ struct msi_msg msg;
+ struct irq_affinity_desc *affinity;
+ void (*write_msi_msg)(struct msi_desc *, void *);
+ void *write_msi_msg_data;
+ union {
+ struct {
+ u32 masked;
+ struct {
+ u8 is_msix: 1;
+ u8 multiple: 3;
+ u8 multi_cap: 3;
+ u8 maskbit: 1;
+ u8 is_64: 1;
+ u8 is_virtual: 1;
+ u16 entry_nr;
+ unsigned int default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void *mask_base;
+ };
+ };
+ struct platform_msi_desc platform;
+ struct fsl_mc_msi_desc fsl_mc;
+ struct ti_sci_inta_msi_desc inta;
+ };
+};
+
+struct irq_chip_regs {
+ long unsigned int enable;
+ long unsigned int disable;
+ long unsigned int mask;
+ long unsigned int ack;
+ long unsigned int eoi;
+ long unsigned int type;
+ long unsigned int polarity;
+};
+
+struct irq_chip_type {
+ struct irq_chip chip;
+ struct irq_chip_regs regs;
+ irq_flow_handler_t handler;
+ u32 type;
+ u32 mask_cache_priv;
+ u32 *mask_cache;
+};
+
+struct irq_chip_generic {
+ raw_spinlock_t lock;
+ void *reg_base;
+ u32 (*reg_readl)(void *);
+ void (*reg_writel)(u32, void *);
+ void (*suspend)(struct irq_chip_generic *);
+ void (*resume)(struct irq_chip_generic *);
+ unsigned int irq_base;
+ unsigned int irq_cnt;
+ u32 mask_cache;
+ u32 type_cache;
+ u32 polarity_cache;
+ u32 wake_enabled;
+ u32 wake_active;
+ unsigned int num_ct;
+ void *private;
+ long unsigned int installed;
+ long unsigned int unused;
+ struct irq_domain *domain;
+ struct list_head list;
+ struct irq_chip_type chip_types[0];
+};
+
+enum irq_gc_flags {
+ IRQ_GC_INIT_MASK_CACHE = 1,
+ IRQ_GC_INIT_NESTED_LOCK = 2,
+ IRQ_GC_MASK_CACHE_PER_TYPE = 4,
+ IRQ_GC_NO_MASK = 8,
+ IRQ_GC_BE_IO = 16,
+};
+
+struct irq_domain_chip_generic {
+ unsigned int irqs_per_chip;
+ unsigned int num_chips;
+ unsigned int irq_flags_to_clear;
+ unsigned int irq_flags_to_set;
+ enum irq_gc_flags gc_flags;
+ struct irq_chip_generic *gc[0];
+};
+
+struct legacy_pic {
+ int nr_legacy_irqs;
+ struct irq_chip *chip;
+ void (*mask)(unsigned int);
+ void (*unmask)(unsigned int);
+ void (*mask_all)();
+ void (*restore_mask)();
+ void (*init)(int);
+ int (*probe)();
+ int (*irq_pending)(unsigned int);
+ void (*make_irq)(unsigned int);
+};
+
+enum refcount_saturation_type {
+ REFCOUNT_ADD_NOT_ZERO_OVF = 0,
+ REFCOUNT_ADD_OVF = 1,
+ REFCOUNT_ADD_UAF = 2,
+ REFCOUNT_SUB_UAF = 3,
+ REFCOUNT_DEC_LEAK = 4,
+};
+
+enum lockdown_reason {
+ LOCKDOWN_NONE = 0,
+ LOCKDOWN_MODULE_SIGNATURE = 1,
+ LOCKDOWN_DEV_MEM = 2,
+ LOCKDOWN_EFI_TEST = 3,
+ LOCKDOWN_KEXEC = 4,
+ LOCKDOWN_HIBERNATION = 5,
+ LOCKDOWN_PCI_ACCESS = 6,
+ LOCKDOWN_IOPORT = 7,
+ LOCKDOWN_MSR = 8,
+ LOCKDOWN_ACPI_TABLES = 9,
+ LOCKDOWN_PCMCIA_CIS = 10,
+ LOCKDOWN_TIOCSSERIAL = 11,
+ LOCKDOWN_MODULE_PARAMETERS = 12,
+ LOCKDOWN_MMIOTRACE = 13,
+ LOCKDOWN_DEBUGFS = 14,
+ LOCKDOWN_XMON_WR = 15,
+ LOCKDOWN_INTEGRITY_MAX = 16,
+ LOCKDOWN_KCORE = 17,
+ LOCKDOWN_KPROBES = 18,
+ LOCKDOWN_BPF_READ = 19,
+ LOCKDOWN_PERF = 20,
+ LOCKDOWN_TRACEFS = 21,
+ LOCKDOWN_XMON_RW = 22,
+ LOCKDOWN_CONFIDENTIALITY_MAX = 23,
+};
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK = 0,
+ LOCKDEP_NOW_UNRELIABLE = 1,
+};
+
+typedef long unsigned int uintptr_t;
+
+struct machine_ops {
+ void (*restart)(char *);
+ void (*halt)();
+ void (*power_off)();
+ void (*shutdown)();
+ void (*crash_shutdown)(struct pt_regs *);
+ void (*emergency_restart)();
+};
+
+struct trace_event_raw_nmi_handler {
+ struct trace_entry ent;
+ void *handler;
+ s64 delta_ns;
+ int handled;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_nmi_handler {};
+
+typedef void (*btf_trace_nmi_handler)(void *, void *, s64, int);
+
+struct nmi_desc {
+ raw_spinlock_t lock;
+ struct list_head head;
+};
+
+struct nmi_stats {
+ unsigned int normal;
+ unsigned int unknown;
+ unsigned int external;
+ unsigned int swallow;
+};
+
+enum nmi_states {
+ NMI_NOT_RUNNING = 0,
+ NMI_EXECUTING = 1,
+ NMI_LATCHED = 2,
+};
+
+typedef enum nmi_states pto_T_____7;
+
+typedef bool pto_T_____8;
+
+enum {
+ DESC_TSS = 9,
+ DESC_LDT = 2,
+ DESCTYPE_S = 16,
+};
+
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+ u16 base1: 8;
+ u16 type: 5;
+ u16 dpl: 2;
+ u16 p: 1;
+ u16 limit1: 4;
+ u16 zero0: 3;
+ u16 g: 1;
+ u16 base2: 8;
+ u32 base3;
+ u32 zero1;
+};
+
+typedef struct ldttss_desc ldt_desc;
+
+struct user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit: 1;
+ unsigned int contents: 2;
+ unsigned int read_exec_only: 1;
+ unsigned int limit_in_pages: 1;
+ unsigned int seg_not_present: 1;
+ unsigned int useable: 1;
+ unsigned int lm: 1;
+};
+
+struct setup_data {
+ __u64 next;
+ __u32 type;
+ __u32 len;
+ __u8 data[0];
+};
+
+struct setup_indirect {
+ __u32 type;
+ __u32 reserved;
+ __u64 len;
+ __u64 addr;
+};
+
+enum efi_secureboot_mode {
+ efi_secureboot_mode_unset = 0,
+ efi_secureboot_mode_unknown = 1,
+ efi_secureboot_mode_disabled = 2,
+ efi_secureboot_mode_enabled = 3,
+};
+
+struct acpi_table_ibft {
+ struct acpi_table_header header;
+ u8 reserved[12];
+};
+
+struct hstate {
+ int next_nid_to_alloc;
+ int next_nid_to_free;
+ unsigned int order;
+ long unsigned int mask;
+ long unsigned int max_huge_pages;
+ long unsigned int nr_huge_pages;
+ long unsigned int free_huge_pages;
+ long unsigned int resv_huge_pages;
+ long unsigned int surplus_huge_pages;
+ long unsigned int nr_overcommit_huge_pages;
+ struct list_head hugepage_activelist;
+ struct list_head hugepage_freelists[1];
+ unsigned int nr_huge_pages_node[1];
+ unsigned int free_huge_pages_node[1];
+ unsigned int surplus_huge_pages_node[1];
+ char name[32];
+};
+
+struct efi_scratch {
+ u64 phys_stack;
+ struct mm_struct *prev_mm;
+};
+
+struct msi_controller {
+ struct module *owner;
+ struct device *dev;
+ struct device_node *of_node;
+ struct list_head list;
+ int (*setup_irq)(struct msi_controller *, struct pci_dev *, struct msi_desc *);
+ int (*setup_irqs)(struct msi_controller *, struct pci_dev *, int, int);
+ void (*teardown_irq)(struct msi_controller *, unsigned int);
+};
+
+struct pci_raw_ops {
+ int (*read)(unsigned int, unsigned int, unsigned int, int, int, u32 *);
+ int (*write)(unsigned int, unsigned int, unsigned int, int, int, u32);
+};
+
+struct clock_event_device___2;
+
+struct syscore_ops {
+ struct list_head node;
+ int (*suspend)();
+ void (*resume)();
+ void (*shutdown)();
+};
+
+struct vm_unmapped_area_info {
+ long unsigned int flags;
+ long unsigned int length;
+ long unsigned int low_limit;
+ long unsigned int high_limit;
+ long unsigned int align_mask;
+ long unsigned int align_offset;
+};
+
+enum align_flags {
+ ALIGN_VA_32 = 1,
+ ALIGN_VA_64 = 2,
+};
+
+enum {
+ MEMREMAP_WB = 1,
+ MEMREMAP_WT = 2,
+ MEMREMAP_WC = 4,
+ MEMREMAP_ENC = 8,
+ MEMREMAP_DEC = 16,
+};
+
+enum {
+ IORES_DESC_NONE = 0,
+ IORES_DESC_CRASH_KERNEL = 1,
+ IORES_DESC_ACPI_TABLES = 2,
+ IORES_DESC_ACPI_NV_STORAGE = 3,
+ IORES_DESC_PERSISTENT_MEMORY = 4,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
+ IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
+};
+
+struct change_member {
+ struct e820_entry *entry;
+ long long unsigned int addr;
+};
+
+struct iommu_group {};
+
+struct iommu_table_entry {
+ initcall_t detect;
+ initcall_t depend;
+ void (*early_init)();
+ void (*late_init)();
+ int flags;
+};
+
+enum dmi_field {
+ DMI_NONE = 0,
+ DMI_BIOS_VENDOR = 1,
+ DMI_BIOS_VERSION = 2,
+ DMI_BIOS_DATE = 3,
+ DMI_BIOS_RELEASE = 4,
+ DMI_EC_FIRMWARE_RELEASE = 5,
+ DMI_SYS_VENDOR = 6,
+ DMI_PRODUCT_NAME = 7,
+ DMI_PRODUCT_VERSION = 8,
+ DMI_PRODUCT_SERIAL = 9,
+ DMI_PRODUCT_UUID = 10,
+ DMI_PRODUCT_SKU = 11,
+ DMI_PRODUCT_FAMILY = 12,
+ DMI_BOARD_VENDOR = 13,
+ DMI_BOARD_NAME = 14,
+ DMI_BOARD_VERSION = 15,
+ DMI_BOARD_SERIAL = 16,
+ DMI_BOARD_ASSET_TAG = 17,
+ DMI_CHASSIS_VENDOR = 18,
+ DMI_CHASSIS_TYPE = 19,
+ DMI_CHASSIS_VERSION = 20,
+ DMI_CHASSIS_SERIAL = 21,
+ DMI_CHASSIS_ASSET_TAG = 22,
+ DMI_STRING_MAX = 23,
+ DMI_OEM_STRING = 24,
+};
+
+enum {
+ NONE_FORCE_HPET_RESUME = 0,
+ OLD_ICH_FORCE_HPET_RESUME = 1,
+ ICH_FORCE_HPET_RESUME = 2,
+ VT8237_FORCE_HPET_RESUME = 3,
+ NVIDIA_FORCE_HPET_RESUME = 4,
+ ATI_FORCE_HPET_RESUME = 5,
+};
+
+struct cpu {
+ int node_id;
+ int hotpluggable;
+ struct device dev;
+};
+
+struct x86_cpu {
+ struct cpu cpu;
+};
+
+typedef int (*cmp_func_t)(const void *, const void *);
+
+struct die_args {
+ struct pt_regs *regs;
+ const char *str;
+ long int err;
+ int trapnr;
+ int signr;
+};
+
+struct smp_alt_module {
+ struct module *mod;
+ char *name;
+ const s32 *locks;
+ const s32 *locks_end;
+ u8 *text;
+ u8 *text_end;
+ struct list_head next;
+};
+
+typedef struct {
+ struct mm_struct *mm;
+} temp_mm_state_t;
+
+struct text_poke_loc {
+ s32 rel_addr;
+ s32 rel32;
+ u8 opcode;
+ const u8 text[5];
+};
+
+struct bp_patching_desc {
+ struct text_poke_loc *vec;
+ int nr_entries;
+ atomic_t refs;
+};
+
+struct user_i387_struct {
+ short unsigned int cwd;
+ short unsigned int swd;
+ short unsigned int twd;
+ short unsigned int fop;
+ __u64 rip;
+ __u64 rdp;
+ __u32 mxcsr;
+ __u32 mxcsr_mask;
+ __u32 st_space[32];
+ __u32 xmm_space[64];
+ __u32 padding[24];
+};
+
+struct user_regs_struct {
+ long unsigned int r15;
+ long unsigned int r14;
+ long unsigned int r13;
+ long unsigned int r12;
+ long unsigned int bp;
+ long unsigned int bx;
+ long unsigned int r11;
+ long unsigned int r10;
+ long unsigned int r9;
+ long unsigned int r8;
+ long unsigned int ax;
+ long unsigned int cx;
+ long unsigned int dx;
+ long unsigned int si;
+ long unsigned int di;
+ long unsigned int orig_ax;
+ long unsigned int ip;
+ long unsigned int cs;
+ long unsigned int flags;
+ long unsigned int sp;
+ long unsigned int ss;
+ long unsigned int fs_base;
+ long unsigned int gs_base;
+ long unsigned int ds;
+ long unsigned int es;
+ long unsigned int fs;
+ long unsigned int gs;
+};
+
+struct user {
+ struct user_regs_struct regs;
+ int u_fpvalid;
+ int pad0;
+ struct user_i387_struct i387;
+ long unsigned int u_tsize;
+ long unsigned int u_dsize;
+ long unsigned int u_ssize;
+ long unsigned int start_code;
+ long unsigned int start_stack;
+ long int signal;
+ int reserved;
+ int pad1;
+ long unsigned int u_ar0;
+ struct user_i387_struct *u_fpstate;
+ long unsigned int magic;
+ char u_comm[32];
+ long unsigned int u_debugreg[8];
+ long unsigned int error_code;
+ long unsigned int fault_address;
+};
+
+enum {
+ HW_BREAKPOINT_LEN_1 = 1,
+ HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_3 = 3,
+ HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_5 = 5,
+ HW_BREAKPOINT_LEN_6 = 6,
+ HW_BREAKPOINT_LEN_7 = 7,
+ HW_BREAKPOINT_LEN_8 = 8,
+};
+
+enum {
+ HW_BREAKPOINT_EMPTY = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = 3,
+ HW_BREAKPOINT_X = 4,
+ HW_BREAKPOINT_INVALID = 7,
+};
+
+typedef unsigned int u_int;
+
+typedef long long unsigned int cycles_t;
+
+struct system_counterval_t {
+ u64 cycles;
+ struct clocksource *cs;
+};
+
+enum {
+ WORK_STRUCT_PENDING_BIT = 0,
+ WORK_STRUCT_DELAYED_BIT = 1,
+ WORK_STRUCT_PWQ_BIT = 2,
+ WORK_STRUCT_LINKED_BIT = 3,
+ WORK_STRUCT_COLOR_SHIFT = 4,
+ WORK_STRUCT_COLOR_BITS = 4,
+ WORK_STRUCT_PENDING = 1,
+ WORK_STRUCT_DELAYED = 2,
+ WORK_STRUCT_PWQ = 4,
+ WORK_STRUCT_LINKED = 8,
+ WORK_STRUCT_STATIC = 0,
+ WORK_NR_COLORS = 15,
+ WORK_NO_COLOR = 15,
+ WORK_CPU_UNBOUND = 240,
+ WORK_STRUCT_FLAG_BITS = 8,
+ WORK_OFFQ_FLAG_BASE = 4,
+ __WORK_OFFQ_CANCELING = 4,
+ WORK_OFFQ_CANCELING = 16,
+ WORK_OFFQ_FLAG_BITS = 1,
+ WORK_OFFQ_POOL_SHIFT = 5,
+ WORK_OFFQ_LEFT = 59,
+ WORK_OFFQ_POOL_BITS = 31,
+ WORK_OFFQ_POOL_NONE = 2147483647,
+ WORK_STRUCT_FLAG_MASK = 255,
+ WORK_STRUCT_WQ_DATA_MASK = -256,
+ WORK_STRUCT_NO_POOL = -32,
+ WORK_BUSY_PENDING = 1,
+ WORK_BUSY_RUNNING = 2,
+ WORKER_DESC_LEN = 24,
+};
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+enum pm_qos_type {
+ PM_QOS_UNITIALIZED = 0,
+ PM_QOS_MAX = 1,
+ PM_QOS_MIN = 2,
+};
+
+struct pm_qos_constraints {
+ struct plist_head list;
+ s32 target_value;
+ s32 default_value;
+ s32 no_constraint_value;
+ enum pm_qos_type type;
+ struct blocking_notifier_head *notifiers;
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct pm_qos_flags {
+ struct list_head list;
+ s32 effective_flags;
+};
+
+struct dev_pm_qos_request;
+
+struct dev_pm_qos {
+ struct pm_qos_constraints resume_latency;
+ struct pm_qos_constraints latency_tolerance;
+ struct freq_constraints freq;
+ struct pm_qos_flags flags;
+ struct dev_pm_qos_request *resume_latency_req;
+ struct dev_pm_qos_request *latency_tolerance_req;
+ struct dev_pm_qos_request *flags_req;
+};
+
+struct pm_qos_flags_request {
+ struct list_head node;
+ s32 flags;
+};
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX = 2,
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+enum dev_pm_qos_req_type {
+ DEV_PM_QOS_RESUME_LATENCY = 1,
+ DEV_PM_QOS_LATENCY_TOLERANCE = 2,
+ DEV_PM_QOS_MIN_FREQUENCY = 3,
+ DEV_PM_QOS_MAX_FREQUENCY = 4,
+ DEV_PM_QOS_FLAGS = 5,
+};
+
+struct dev_pm_qos_request {
+ enum dev_pm_qos_req_type type;
+ union {
+ struct plist_node pnode;
+ struct pm_qos_flags_request flr;
+ struct freq_qos_request freq;
+ } data;
+ struct device *dev;
+};
+
+enum cpufreq_table_sorting {
+ CPUFREQ_TABLE_UNSORTED = 0,
+ CPUFREQ_TABLE_SORTED_ASCENDING = 1,
+ CPUFREQ_TABLE_SORTED_DESCENDING = 2,
+};
+
+struct cpufreq_cpuinfo {
+ unsigned int max_freq;
+ unsigned int min_freq;
+ unsigned int transition_latency;
+};
+
+struct cpufreq_stats;
+
+struct clk;
+
+struct cpufreq_governor;
+
+struct cpufreq_frequency_table;
+
+struct thermal_cooling_device;
+
+struct cpufreq_policy {
+ cpumask_var_t cpus;
+ cpumask_var_t related_cpus;
+ cpumask_var_t real_cpus;
+ unsigned int shared_type;
+ unsigned int cpu;
+ struct clk *clk;
+ struct cpufreq_cpuinfo cpuinfo;
+ unsigned int min;
+ unsigned int max;
+ unsigned int cur;
+ unsigned int restore_freq;
+ unsigned int suspend_freq;
+ unsigned int policy;
+ unsigned int last_policy;
+ struct cpufreq_governor *governor;
+ void *governor_data;
+ char last_governor[16];
+ struct work_struct update;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+ struct cpufreq_frequency_table *freq_table;
+ enum cpufreq_table_sorting freq_table_sorted;
+ struct list_head policy_list;
+ struct kobject kobj;
+ struct completion kobj_unregister;
+ struct rw_semaphore rwsem;
+ bool fast_switch_possible;
+ bool fast_switch_enabled;
+ unsigned int transition_delay_us;
+ bool dvfs_possible_from_any_cpu;
+ unsigned int cached_target_freq;
+ int cached_resolved_idx;
+ bool transition_ongoing;
+ spinlock_t transition_lock;
+ wait_queue_head_t transition_wait;
+ struct task_struct *transition_task;
+ struct cpufreq_stats *stats;
+ void *driver_data;
+ struct thermal_cooling_device *cdev;
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
+};
+
+struct cpufreq_governor {
+ char name[16];
+ int (*init)(struct cpufreq_policy *);
+ void (*exit)(struct cpufreq_policy *);
+ int (*start)(struct cpufreq_policy *);
+ void (*stop)(struct cpufreq_policy *);
+ void (*limits)(struct cpufreq_policy *);
+ ssize_t (*show_setspeed)(struct cpufreq_policy *, char *);
+ int (*store_setspeed)(struct cpufreq_policy *, unsigned int);
+ bool dynamic_switching;
+ struct list_head governor_list;
+ struct module *owner;
+};
+
+struct cpufreq_frequency_table {
+ unsigned int flags;
+ unsigned int driver_data;
+ unsigned int frequency;
+};
+
+struct cpufreq_freqs {
+ struct cpufreq_policy *policy;
+ unsigned int old;
+ unsigned int new;
+ u8 flags;
+};
+
+struct freq_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpufreq_policy *, char *);
+ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t);
+};
+
+struct cyc2ns {
+ struct cyc2ns_data data[2];
+ seqcount_t seq;
+};
+
+struct x86_cpu_id {
+ __u16 vendor;
+ __u16 family;
+ __u16 model;
+ __u16 steppings;
+ __u16 feature;
+ kernel_ulong_t driver_data;
+};
+
+struct muldiv {
+ u32 multiplier;
+ u32 divider;
+};
+
+struct freq_desc {
+ bool use_msr_plat;
+ struct muldiv muldiv[16];
+ u32 freqs[16];
+ u32 mask;
+};
+
+struct dmi_strmatch {
+ unsigned char slot: 7;
+ unsigned char exact_match: 1;
+ char substr[79];
+};
+
+struct dmi_system_id {
+ int (*callback)(const struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+};
+
+struct pdev_archdata {};
+
+struct mfd_cell;
+
+struct platform_device_id;
+
+struct platform_device {
+ const char *name;
+ int id;
+ bool id_auto;
+ struct device dev;
+ u64 platform_dma_mask;
+ struct device_dma_parameters dma_parms;
+ u32 num_resources;
+ struct resource *resource;
+ const struct platform_device_id *id_entry;
+ char *driver_override;
+ struct mfd_cell *mfd_cell;
+ struct pdev_archdata archdata;
+};
+
+struct platform_device_id {
+ char name[20];
+ kernel_ulong_t driver_data;
+};
+
+struct rtc_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+struct pnp_device_id {
+ __u8 id[8];
+ kernel_ulong_t driver_data;
+};
+
+struct pnp_card_device_id {
+ __u8 id[8];
+ kernel_ulong_t driver_data;
+ struct {
+ __u8 id[8];
+ } devs[8];
+};
+
+struct pnp_protocol;
+
+struct pnp_id;
+
+struct pnp_card {
+ struct device dev;
+ unsigned char number;
+ struct list_head global_list;
+ struct list_head protocol_list;
+ struct list_head devices;
+ struct pnp_protocol *protocol;
+ struct pnp_id *id;
+ char name[50];
+ unsigned char pnpver;
+ unsigned char productver;
+ unsigned int serial;
+ unsigned char checksum;
+ struct proc_dir_entry *procdir;
+};
+
+struct pnp_dev;
+
+struct pnp_protocol {
+ struct list_head protocol_list;
+ char *name;
+ int (*get)(struct pnp_dev *);
+ int (*set)(struct pnp_dev *);
+ int (*disable)(struct pnp_dev *);
+ bool (*can_wakeup)(struct pnp_dev *);
+ int (*suspend)(struct pnp_dev *, pm_message_t);
+ int (*resume)(struct pnp_dev *);
+ unsigned char number;
+ struct device dev;
+ struct list_head cards;
+ struct list_head devices;
+};
+
+struct pnp_id {
+ char id[8];
+ struct pnp_id *next;
+};
+
+struct pnp_card_driver;
+
+struct pnp_card_link {
+ struct pnp_card *card;
+ struct pnp_card_driver *driver;
+ void *driver_data;
+ pm_message_t pm_state;
+};
+
+struct pnp_driver {
+ const char *name;
+ const struct pnp_device_id *id_table;
+ unsigned int flags;
+ int (*probe)(struct pnp_dev *, const struct pnp_device_id *);
+ void (*remove)(struct pnp_dev *);
+ void (*shutdown)(struct pnp_dev *);
+ int (*suspend)(struct pnp_dev *, pm_message_t);
+ int (*resume)(struct pnp_dev *);
+ struct device_driver driver;
+};
+
+struct pnp_card_driver {
+ struct list_head global_list;
+ char *name;
+ const struct pnp_card_device_id *id_table;
+ unsigned int flags;
+ int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *);
+ void (*remove)(struct pnp_card_link *);
+ int (*suspend)(struct pnp_card_link *, pm_message_t);
+ int (*resume)(struct pnp_card_link *);
+ struct pnp_driver link;
+};
+
+struct pnp_dev {
+ struct device dev;
+ u64 dma_mask;
+ unsigned int number;
+ int status;
+ struct list_head global_list;
+ struct list_head protocol_list;
+ struct list_head card_list;
+ struct list_head rdev_list;
+ struct pnp_protocol *protocol;
+ struct pnp_card *card;
+ struct pnp_driver *driver;
+ struct pnp_card_link *card_link;
+ struct pnp_id *id;
+ int active;
+ int capabilities;
+ unsigned int num_dependent_sets;
+ struct list_head resources;
+ struct list_head options;
+ char name[50];
+ int flags;
+ struct proc_dir_entry *procent;
+ void *data;
+};
+
+struct sfi_rtc_table_entry {
+ u64 phys_addr;
+ u32 irq;
+} __attribute__((packed));
+
+enum intel_mid_cpu_type {
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW = 3,
+ INTEL_MID_CPU_CHIP_TANGIER = 4,
+};
+
+enum intel_mid_timer_options {
+ INTEL_MID_TIMER_DEFAULT = 0,
+ INTEL_MID_TIMER_APBT_ONLY = 1,
+ INTEL_MID_TIMER_LAPIC_APBT = 2,
+};
+
+typedef struct ldttss_desc tss_desc;
+
+enum idle_boot_override {
+ IDLE_NO_OVERRIDE = 0,
+ IDLE_HALT = 1,
+ IDLE_NOMWAIT = 2,
+ IDLE_POLL = 3,
+};
+
+enum tick_broadcast_mode {
+ TICK_BROADCAST_OFF = 0,
+ TICK_BROADCAST_ON = 1,
+ TICK_BROADCAST_FORCE = 2,
+};
+
+enum tick_broadcast_state {
+ TICK_BROADCAST_EXIT = 0,
+ TICK_BROADCAST_ENTER = 1,
+};
+
+struct cpuidle_state_usage {
+ long long unsigned int disable;
+ long long unsigned int usage;
+ u64 time_ns;
+ long long unsigned int above;
+ long long unsigned int below;
+};
+
+struct cpuidle_driver_kobj;
+
+struct cpuidle_state_kobj;
+
+struct cpuidle_device_kobj;
+
+struct cpuidle_device {
+ unsigned int registered: 1;
+ unsigned int enabled: 1;
+ unsigned int poll_time_limit: 1;
+ unsigned int cpu;
+ ktime_t next_hrtimer;
+ int last_state_idx;
+ u64 last_residency_ns;
+ u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
+ struct cpuidle_state_usage states_usage[10];
+ struct cpuidle_state_kobj *kobjs[10];
+ struct cpuidle_driver_kobj *kobj_driver;
+ struct cpuidle_device_kobj *kobj_dev;
+ struct list_head device_list;
+};
+
+struct inactive_task_frame {
+ long unsigned int r15;
+ long unsigned int r14;
+ long unsigned int r13;
+ long unsigned int r12;
+ long unsigned int bx;
+ long unsigned int bp;
+ long unsigned int ret_addr;
+};
+
+struct fork_frame {
+ struct inactive_task_frame frame;
+ struct pt_regs regs;
+};
+
+struct ssb_state {
+ struct ssb_state *shared_state;
+ raw_spinlock_t lock;
+ unsigned int disable_state;
+ long unsigned int local_state;
+};
+
+struct trace_event_raw_x86_fpu {
+ struct trace_entry ent;
+ struct fpu *fpu;
+ bool load_fpu;
+ u64 xfeatures;
+ u64 xcomp_bv;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_x86_fpu {};
+
+typedef void (*btf_trace_x86_fpu_before_save)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_after_save)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_before_restore)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_after_restore)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_regs_activated)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_regs_deactivated)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_init_state)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_dropped)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_copy_src)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_copy_dst)(void *, struct fpu *);
+
+typedef void (*btf_trace_x86_fpu_xstate_check_failed)(void *, struct fpu *);
+
+typedef struct fpu *pto_T_____9;
+
+struct user_regset;
+
+typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *);
+
+typedef int user_regset_get_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, void *, void *);
+
+typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *);
+
+typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int);
+
+typedef unsigned int user_regset_get_size_fn(struct task_struct *, const struct user_regset *);
+
+struct user_regset {
+ user_regset_get_fn *get;
+ user_regset_set_fn *set;
+ user_regset_active_fn *active;
+ user_regset_writeback_fn *writeback;
+ user_regset_get_size_fn *get_size;
+ unsigned int n;
+ unsigned int size;
+ unsigned int align;
+ unsigned int bias;
+ unsigned int core_note_type;
+};
+
+struct _fpx_sw_bytes {
+ __u32 magic1;
+ __u32 extended_size;
+ __u64 xfeatures;
+ __u32 xstate_size;
+ __u32 padding[7];
+};
+
+struct _fpreg {
+ __u16 significand[4];
+ __u16 exponent;
+};
+
+struct _fpxreg {
+ __u16 significand[4];
+ __u16 exponent;
+ __u16 padding[3];
+};
+
+struct _xmmreg {
+ __u32 element[4];
+};
+
+struct _fpstate_32 {
+ __u32 cw;
+ __u32 sw;
+ __u32 tag;
+ __u32 ipoff;
+ __u32 cssel;
+ __u32 dataoff;
+ __u32 datasel;
+ struct _fpreg _st[8];
+ __u16 status;
+ __u16 magic;
+ __u32 _fxsr_env[6];
+ __u32 mxcsr;
+ __u32 reserved;
+ struct _fpxreg _fxsr_st[8];
+ struct _xmmreg _xmm[8];
+ union {
+ __u32 padding1[44];
+ __u32 padding[44];
+ };
+ union {
+ __u32 padding2[12];
+ struct _fpx_sw_bytes sw_reserved;
+ };
+};
+
+struct user_i387_ia32_struct {
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ u32 st_space[20];
+};
+
+struct user_regset_view {
+ const char *name;
+ const struct user_regset *regsets;
+ unsigned int n;
+ u32 e_flags;
+ u16 e_machine;
+ u8 ei_osabi;
+};
+
+enum x86_regset {
+ REGSET_GENERAL = 0,
+ REGSET_FP = 1,
+ REGSET_XFP = 2,
+ REGSET_IOPERM64 = 2,
+ REGSET_XSTATE = 3,
+ REGSET_TLS = 4,
+ REGSET_IOPERM32 = 5,
+};
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+typedef bool (*stack_trace_consume_fn)(void *, long unsigned int, bool);
+
+struct stack_frame_user {
+ const void *next_fp;
+ long unsigned int ret_addr;
+};
+
+enum cache_type {
+ CACHE_TYPE_NOCACHE = 0,
+ CACHE_TYPE_INST = 1,
+ CACHE_TYPE_DATA = 2,
+ CACHE_TYPE_SEPARATE = 3,
+ CACHE_TYPE_UNIFIED = 4,
+};
+
+struct cacheinfo {
+ unsigned int id;
+ enum cache_type type;
+ unsigned int level;
+ unsigned int coherency_line_size;
+ unsigned int number_of_sets;
+ unsigned int ways_of_associativity;
+ unsigned int physical_line_partition;
+ unsigned int size;
+ cpumask_t shared_cpu_map;
+ unsigned int attributes;
+ void *fw_token;
+ bool disable_sysfs;
+ void *priv;
+};
+
+struct cpu_cacheinfo {
+ struct cacheinfo *info_list;
+ unsigned int num_levels;
+ unsigned int num_leaves;
+ bool cpu_map_populated;
+};
+
+struct amd_nb_bus_dev_range {
+ u8 bus;
+ u8 dev_base;
+ u8 dev_limit;
+};
+
+struct amd_l3_cache {
+ unsigned int indices;
+ u8 subcaches[4];
+};
+
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
+ bool interrupt_capable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
+};
+
+struct threshold_bank {
+ struct kobject *kobj;
+ struct threshold_block *blocks;
+ refcount_t cpus;
+ unsigned int shared;
+};
+
+struct amd_northbridge {
+ struct pci_dev *root;
+ struct pci_dev *misc;
+ struct pci_dev *link;
+ struct amd_l3_cache l3_cache;
+ struct threshold_bank *bank4;
+};
+
+struct cpu_dev {
+ const char *c_vendor;
+ const char *c_ident[2];
+ void (*c_early_init)(struct cpuinfo_x86 *);
+ void (*c_bsp_init)(struct cpuinfo_x86 *);
+ void (*c_init)(struct cpuinfo_x86 *);
+ void (*c_identify)(struct cpuinfo_x86 *);
+ void (*c_detect_tlb)(struct cpuinfo_x86 *);
+ int c_x86_vendor;
+};
+
+enum tsx_ctrl_states {
+ TSX_CTRL_ENABLE = 0,
+ TSX_CTRL_DISABLE = 1,
+ TSX_CTRL_NOT_SUPPORTED = 2,
+};
+
+struct _cache_table {
+ unsigned char descriptor;
+ char cache_type;
+ short int size;
+};
+
+enum _cache_type {
+ CTYPE_NULL = 0,
+ CTYPE_DATA = 1,
+ CTYPE_INST = 2,
+ CTYPE_UNIFIED = 3,
+};
+
+union _cpuid4_leaf_eax {
+ struct {
+ enum _cache_type type: 5;
+ unsigned int level: 3;
+ unsigned int is_self_initializing: 1;
+ unsigned int is_fully_associative: 1;
+ unsigned int reserved: 4;
+ unsigned int num_threads_sharing: 12;
+ unsigned int num_cores_on_die: 6;
+ } split;
+ u32 full;
+};
+
+union _cpuid4_leaf_ebx {
+ struct {
+ unsigned int coherency_line_size: 12;
+ unsigned int physical_line_partition: 10;
+ unsigned int ways_of_associativity: 10;
+ } split;
+ u32 full;
+};
+
+union _cpuid4_leaf_ecx {
+ struct {
+ unsigned int number_of_sets: 32;
+ } split;
+ u32 full;
+};
+
+struct _cpuid4_info_regs {
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned int id;
+ long unsigned int size;
+ struct amd_northbridge *nb;
+};
+
+union l1_cache {
+ struct {
+ unsigned int line_size: 8;
+ unsigned int lines_per_tag: 8;
+ unsigned int assoc: 8;
+ unsigned int size_in_kb: 8;
+ };
+ unsigned int val;
+};
+
+union l2_cache {
+ struct {
+ unsigned int line_size: 8;
+ unsigned int lines_per_tag: 4;
+ unsigned int assoc: 4;
+ unsigned int size_in_kb: 16;
+ };
+ unsigned int val;
+};
+
+union l3_cache {
+ struct {
+ unsigned int line_size: 8;
+ unsigned int lines_per_tag: 4;
+ unsigned int assoc: 4;
+ unsigned int res: 2;
+ unsigned int size_encoded: 14;
+ };
+ unsigned int val;
+};
+
+struct cpuid_bit {
+ u16 feature;
+ u8 reg;
+ u8 bit;
+ u32 level;
+ u32 sub_leaf;
+};
+
+enum cpuid_leafs {
+ CPUID_1_EDX = 0,
+ CPUID_8000_0001_EDX = 1,
+ CPUID_8086_0001_EDX = 2,
+ CPUID_LNX_1 = 3,
+ CPUID_1_ECX = 4,
+ CPUID_C000_0001_EDX = 5,
+ CPUID_8000_0001_ECX = 6,
+ CPUID_LNX_2 = 7,
+ CPUID_LNX_3 = 8,
+ CPUID_7_0_EBX = 9,
+ CPUID_D_1_EAX = 10,
+ CPUID_LNX_4 = 11,
+ CPUID_7_1_EAX = 12,
+ CPUID_8000_0008_EBX = 13,
+ CPUID_6_EAX = 14,
+ CPUID_8000_000A_EDX = 15,
+ CPUID_7_ECX = 16,
+ CPUID_8000_0007_EBX = 17,
+ CPUID_7_EDX = 18,
+};
+
+struct cpuid_dependent_feature {
+ u32 feature;
+ u32 level;
+};
+
+enum spectre_v2_mitigation {
+ SPECTRE_V2_NONE = 0,
+ SPECTRE_V2_RETPOLINE_GENERIC = 1,
+ SPECTRE_V2_RETPOLINE_AMD = 2,
+ SPECTRE_V2_IBRS_ENHANCED = 3,
+};
+
+enum spectre_v2_user_mitigation {
+ SPECTRE_V2_USER_NONE = 0,
+ SPECTRE_V2_USER_STRICT = 1,
+ SPECTRE_V2_USER_STRICT_PREFERRED = 2,
+ SPECTRE_V2_USER_PRCTL = 3,
+ SPECTRE_V2_USER_SECCOMP = 4,
+};
+
+enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE = 0,
+ SPEC_STORE_BYPASS_DISABLE = 1,
+ SPEC_STORE_BYPASS_PRCTL = 2,
+ SPEC_STORE_BYPASS_SECCOMP = 3,
+};
+
+enum mds_mitigations {
+ MDS_MITIGATION_OFF = 0,
+ MDS_MITIGATION_FULL = 1,
+ MDS_MITIGATION_VMWERV = 2,
+};
+
+enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_AUTO = 0,
+ VMENTER_L1D_FLUSH_NEVER = 1,
+ VMENTER_L1D_FLUSH_COND = 2,
+ VMENTER_L1D_FLUSH_ALWAYS = 3,
+ VMENTER_L1D_FLUSH_EPT_DISABLED = 4,
+ VMENTER_L1D_FLUSH_NOT_REQUIRED = 5,
+};
+
+enum taa_mitigations {
+ TAA_MITIGATION_OFF = 0,
+ TAA_MITIGATION_UCODE_NEEDED = 1,
+ TAA_MITIGATION_VERW = 2,
+ TAA_MITIGATION_TSX_DISABLED = 3,
+};
+
+enum srbds_mitigations {
+ SRBDS_MITIGATION_OFF = 0,
+ SRBDS_MITIGATION_UCODE_NEEDED = 1,
+ SRBDS_MITIGATION_FULL = 2,
+ SRBDS_MITIGATION_TSX_OFF = 3,
+ SRBDS_MITIGATION_HYPERVISOR = 4,
+};
+
+enum spectre_v1_mitigation {
+ SPECTRE_V1_MITIGATION_NONE = 0,
+ SPECTRE_V1_MITIGATION_AUTO = 1,
+};
+
+enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_NONE = 0,
+ SPECTRE_V2_CMD_AUTO = 1,
+ SPECTRE_V2_CMD_FORCE = 2,
+ SPECTRE_V2_CMD_RETPOLINE = 3,
+ SPECTRE_V2_CMD_RETPOLINE_GENERIC = 4,
+ SPECTRE_V2_CMD_RETPOLINE_AMD = 5,
+};
+
+enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_NONE = 0,
+ SPECTRE_V2_USER_CMD_AUTO = 1,
+ SPECTRE_V2_USER_CMD_FORCE = 2,
+ SPECTRE_V2_USER_CMD_PRCTL = 3,
+ SPECTRE_V2_USER_CMD_PRCTL_IBPB = 4,
+ SPECTRE_V2_USER_CMD_SECCOMP = 5,
+ SPECTRE_V2_USER_CMD_SECCOMP_IBPB = 6,
+};
+
+enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_NONE = 0,
+ SPEC_STORE_BYPASS_CMD_AUTO = 1,
+ SPEC_STORE_BYPASS_CMD_ON = 2,
+ SPEC_STORE_BYPASS_CMD_PRCTL = 3,
+ SPEC_STORE_BYPASS_CMD_SECCOMP = 4,
+};
+
+enum hk_flags {
+ HK_FLAG_TIMER = 1,
+ HK_FLAG_RCU = 2,
+ HK_FLAG_MISC = 4,
+ HK_FLAG_SCHED = 8,
+ HK_FLAG_TICK = 16,
+ HK_FLAG_DOMAIN = 32,
+ HK_FLAG_WQ = 64,
+ HK_FLAG_MANAGED_IRQ = 128,
+};
+
+struct aperfmperf_sample {
+ unsigned int khz;
+ ktime_t time;
+ u64 aperf;
+ u64 mperf;
+};
+
+struct cpuid_dep {
+ unsigned int feature;
+ unsigned int depends;
+};
+
+enum vmx_feature_leafs {
+ MISC_FEATURES = 0,
+ PRIMARY_CTLS = 1,
+ SECONDARY_CTLS = 2,
+ NR_VMX_FEATURE_WORDS = 3,
+};
+
+struct _tlb_table {
+ unsigned char descriptor;
+ char tlb_type;
+ unsigned int entries;
+ char info[128];
+};
+
+enum split_lock_detect_state {
+ sld_off = 0,
+ sld_warn = 1,
+ sld_fatal = 2,
+};
+
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+
+struct cpuid_regs {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+enum pconfig_target {
+ INVALID_TARGET = 0,
+ MKTME_TARGET = 1,
+ PCONFIG_TARGET_NR = 2,
+};
+
+enum {
+ PCONFIG_CPUID_SUBLEAF_INVALID = 0,
+ PCONFIG_CPUID_SUBLEAF_TARGETID = 1,
+};
+
+struct mtrr_var_range {
+ __u32 base_lo;
+ __u32 base_hi;
+ __u32 mask_lo;
+ __u32 mask_hi;
+};
+
+typedef __u8 mtrr_type;
+
+struct mtrr_state_type {
+ struct mtrr_var_range var_ranges[256];
+ mtrr_type fixed_ranges[88];
+ unsigned char enabled;
+ unsigned char have_fixed;
+ mtrr_type def_type;
+};
+
+struct mtrr_ops {
+ u32 vendor;
+ u32 use_intel_if;
+ void (*set)(unsigned int, long unsigned int, long unsigned int, mtrr_type);
+ void (*set_all)();
+ void (*get)(unsigned int, long unsigned int *, long unsigned int *, mtrr_type *);
+ int (*get_free_region)(long unsigned int, long unsigned int, int);
+ int (*validate_add_page)(long unsigned int, long unsigned int, unsigned int);
+ int (*have_wrcomb)();
+};
+
+struct set_mtrr_data {
+ long unsigned int smp_base;
+ long unsigned int smp_size;
+ unsigned int smp_reg;
+ mtrr_type smp_type;
+};
+
+struct mtrr_value {
+ mtrr_type ltype;
+ long unsigned int lbase;
+ long unsigned int lsize;
+};
+
+struct proc_ops {
+ unsigned int proc_flags;
+ int (*proc_open)(struct inode *, struct file *);
+ ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *);
+ ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *);
+ loff_t (*proc_lseek)(struct file *, loff_t, int);
+ int (*proc_release)(struct inode *, struct file *);
+ __poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
+ long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int);
+ int (*proc_mmap)(struct file *, struct vm_area_struct *);
+ long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+};
+
+struct mtrr_sentry {
+ __u64 base;
+ __u32 size;
+ __u32 type;
+};
+
+struct mtrr_gentry {
+ __u64 base;
+ __u32 size;
+ __u32 regnum;
+ __u32 type;
+ __u32 _pad;
+};
+
+struct fixed_range_block {
+ int base_msr;
+ int ranges;
+};
+
+struct var_mtrr_range_state {
+ long unsigned int base_pfn;
+ long unsigned int size_pfn;
+ mtrr_type type;
+};
+
+struct vmware_steal_time {
+ union {
+ uint64_t clock;
+ struct {
+ uint32_t clock_low;
+ uint32_t clock_high;
+ };
+ };
+ uint64_t reserved[7];
+};
+
+enum mp_irq_source_types {
+ mp_INT = 0,
+ mp_NMI = 1,
+ mp_SMI = 2,
+ mp_ExtINT = 3,
+};
+
+struct IO_APIC_route_entry {
+ __u32 vector: 8;
+ __u32 delivery_mode: 3;
+ __u32 dest_mode: 1;
+ __u32 delivery_status: 1;
+ __u32 polarity: 1;
+ __u32 irr: 1;
+ __u32 trigger: 1;
+ __u32 mask: 1;
+ __u32 __reserved_2: 15;
+ __u32 __reserved_3: 24;
+ __u32 dest: 8;
+};
+
+typedef u64 acpi_physical_address;
+
+typedef u32 acpi_status;
+
+typedef void *acpi_handle;
+
+typedef u8 acpi_adr_space_type;
+
+struct acpi_subtable_header {
+ u8 type;
+ u8 length;
+};
+
+struct acpi_table_boot {
+ struct acpi_table_header header;
+ u8 cmos_index;
+ u8 reserved[3];
+};
+
+struct acpi_hmat_structure {
+ u16 type;
+ u16 reserved;
+ u32 length;
+};
+
+struct acpi_table_hpet {
+ struct acpi_table_header header;
+ u32 id;
+ struct acpi_generic_address address;
+ u8 sequence;
+ u16 minimum_tick;
+ u8 flags;
+} __attribute__((packed));
+
+struct acpi_table_madt {
+ struct acpi_table_header header;
+ u32 address;
+ u32 flags;
+};
+
+enum acpi_madt_type {
+ ACPI_MADT_TYPE_LOCAL_APIC = 0,
+ ACPI_MADT_TYPE_IO_APIC = 1,
+ ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
+ ACPI_MADT_TYPE_NMI_SOURCE = 3,
+ ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
+ ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
+ ACPI_MADT_TYPE_IO_SAPIC = 6,
+ ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
+ ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
+ ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
+ ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
+ ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
+ ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
+ ACPI_MADT_TYPE_RESERVED = 16,
+};
+
+struct acpi_madt_local_apic {
+ struct acpi_subtable_header header;
+ u8 processor_id;
+ u8 id;
+ u32 lapic_flags;
+};
+
+struct acpi_madt_io_apic {
+ struct acpi_subtable_header header;
+ u8 id;
+ u8 reserved;
+ u32 address;
+ u32 global_irq_base;
+};
+
+struct acpi_madt_interrupt_override {
+ struct acpi_subtable_header header;
+ u8 bus;
+ u8 source_irq;
+ u32 global_irq;
+ u16 inti_flags;
+} __attribute__((packed));
+
+struct acpi_madt_nmi_source {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u32 global_irq;
+};
+
+struct acpi_madt_local_apic_nmi {
+ struct acpi_subtable_header header;
+ u8 processor_id;
+ u16 inti_flags;
+ u8 lint;
+} __attribute__((packed));
+
+struct acpi_madt_local_apic_override {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u64 address;
+} __attribute__((packed));
+
+struct acpi_madt_local_sapic {
+ struct acpi_subtable_header header;
+ u8 processor_id;
+ u8 id;
+ u8 eid;
+ u8 reserved[3];
+ u32 lapic_flags;
+ u32 uid;
+ char uid_string[1];
+} __attribute__((packed));
+
+struct acpi_madt_local_x2apic {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 local_apic_id;
+ u32 lapic_flags;
+ u32 uid;
+};
+
+struct acpi_madt_local_x2apic_nmi {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u32 uid;
+ u8 lint;
+ u8 reserved[3];
+};
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+};
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const long unsigned int);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ int count;
+};
+
+typedef u32 phys_cpuid_t;
+
+struct gpio_desc;
+
+enum irq_alloc_type {
+ X86_IRQ_ALLOC_TYPE_IOAPIC = 1,
+ X86_IRQ_ALLOC_TYPE_HPET = 2,
+ X86_IRQ_ALLOC_TYPE_MSI = 3,
+ X86_IRQ_ALLOC_TYPE_MSIX = 4,
+ X86_IRQ_ALLOC_TYPE_DMAR = 5,
+ X86_IRQ_ALLOC_TYPE_UV = 6,
+};
+
+struct irq_alloc_info {
+ enum irq_alloc_type type;
+ u32 flags;
+ const struct cpumask *mask;
+ union {
+ int unused;
+ struct {
+ int hpet_id;
+ int hpet_index;
+ void *hpet_data;
+ };
+ struct {
+ struct pci_dev *msi_dev;
+ irq_hw_number_t msi_hwirq;
+ };
+ struct {
+ int ioapic_id;
+ int ioapic_pin;
+ int ioapic_node;
+ u32 ioapic_trigger: 1;
+ u32 ioapic_polarity: 1;
+ u32 ioapic_valid: 1;
+ struct IO_APIC_route_entry *ioapic_entry;
+ };
+ };
+};
+
+struct circ_buf {
+ char *buf;
+ int head;
+ int tail;
+};
+
+struct serial_icounter_struct {
+ int cts;
+ int dsr;
+ int rng;
+ int dcd;
+ int rx;
+ int tx;
+ int frame;
+ int overrun;
+ int parity;
+ int brk;
+ int buf_overrun;
+ int reserved[9];
+};
+
+struct serial_struct {
+ int type;
+ int line;
+ unsigned int port;
+ int irq;
+ int flags;
+ int xmit_fifo_size;
+ int custom_divisor;
+ int baud_base;
+ short unsigned int close_delay;
+ char io_type;
+ char reserved_char[1];
+ int hub6;
+ short unsigned int closing_wait;
+ short unsigned int closing_wait2;
+ unsigned char *iomem_base;
+ short unsigned int iomem_reg_shift;
+ unsigned int port_high;
+ long unsigned int iomap_base;
+};
+
+struct serial_rs485 {
+ __u32 flags;
+ __u32 delay_rts_before_send;
+ __u32 delay_rts_after_send;
+ __u32 padding[5];
+};
+
+struct serial_iso7816 {
+ __u32 flags;
+ __u32 tg;
+ __u32 sc_fi;
+ __u32 sc_di;
+ __u32 clk;
+ __u32 reserved[5];
+};
+
+struct uart_port;
+
+struct uart_ops {
+ unsigned int (*tx_empty)(struct uart_port *);
+ void (*set_mctrl)(struct uart_port *, unsigned int);
+ unsigned int (*get_mctrl)(struct uart_port *);
+ void (*stop_tx)(struct uart_port *);
+ void (*start_tx)(struct uart_port *);
+ void (*throttle)(struct uart_port *);
+ void (*unthrottle)(struct uart_port *);
+ void (*send_xchar)(struct uart_port *, char);
+ void (*stop_rx)(struct uart_port *);
+ void (*enable_ms)(struct uart_port *);
+ void (*break_ctl)(struct uart_port *, int);
+ int (*startup)(struct uart_port *);
+ void (*shutdown)(struct uart_port *);
+ void (*flush_buffer)(struct uart_port *);
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
+ const char * (*type)(struct uart_port *);
+ void (*release_port)(struct uart_port *);
+ int (*request_port)(struct uart_port *);
+ void (*config_port)(struct uart_port *, int);
+ int (*verify_port)(struct uart_port *, struct serial_struct *);
+ int (*ioctl)(struct uart_port *, unsigned int, long unsigned int);
+};
+
+struct uart_icount {
+ __u32 cts;
+ __u32 dsr;
+ __u32 rng;
+ __u32 dcd;
+ __u32 rx;
+ __u32 tx;
+ __u32 frame;
+ __u32 overrun;
+ __u32 parity;
+ __u32 brk;
+ __u32 buf_overrun;
+};
+
+typedef unsigned int upf_t;
+
+typedef unsigned int upstat_t;
+
+struct uart_state;
+
+struct uart_port {
+ spinlock_t lock;
+ long unsigned int iobase;
+ unsigned char *membase;
+ unsigned int (*serial_in)(struct uart_port *, int);
+ void (*serial_out)(struct uart_port *, int, int);
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
+ unsigned int (*get_mctrl)(struct uart_port *);
+ void (*set_mctrl)(struct uart_port *, unsigned int);
+ unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *);
+ void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int);
+ int (*startup)(struct uart_port *);
+ void (*shutdown)(struct uart_port *);
+ void (*throttle)(struct uart_port *);
+ void (*unthrottle)(struct uart_port *);
+ int (*handle_irq)(struct uart_port *);
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
+ void (*handle_break)(struct uart_port *);
+ int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *);
+ unsigned int irq;
+ long unsigned int irqflags;
+ unsigned int uartclk;
+ unsigned int fifosize;
+ unsigned char x_char;
+ unsigned char regshift;
+ unsigned char iotype;
+ unsigned char quirks;
+ unsigned int read_status_mask;
+ unsigned int ignore_status_mask;
+ struct uart_state *state;
+ struct uart_icount icount;
+ struct console *cons;
+ upf_t flags;
+ upstat_t status;
+ int hw_stopped;
+ unsigned int mctrl;
+ unsigned int timeout;
+ unsigned int type;
+ const struct uart_ops *ops;
+ unsigned int custom_divisor;
+ unsigned int line;
+ unsigned int minor;
+ resource_size_t mapbase;
+ resource_size_t mapsize;
+ struct device *dev;
+ long unsigned int sysrq;
+ unsigned int sysrq_ch;
+ unsigned char has_sysrq;
+ unsigned char sysrq_seq;
+ unsigned char hub6;
+ unsigned char suspended;
+ const char *name;
+ struct attribute_group *attr_group;
+ const struct attribute_group **tty_groups;
+ struct serial_rs485 rs485;
+ struct gpio_desc *rs485_term_gpio;
+ struct serial_iso7816 iso7816;
+ void *private_data;
+};
+
+enum uart_pm_state {
+ UART_PM_STATE_ON = 0,
+ UART_PM_STATE_OFF = 3,
+ UART_PM_STATE_UNDEFINED = 4,
+};
+
+struct uart_state {
+ struct tty_port port;
+ enum uart_pm_state pm_state;
+ struct circ_buf xmit;
+ atomic_t refcount;
+ wait_queue_head_t remove_wait;
+ struct uart_port *uart_port;
+};
+
+struct earlycon_device {
+ struct console *con;
+ struct uart_port port;
+ char options[16];
+ unsigned int baud;
+};
+
+struct earlycon_id {
+ char name[15];
+ char name_term;
+ char compatible[128];
+ int (*setup)(struct earlycon_device *, const char *);
+};
+
+enum ioapic_domain_type {
+ IOAPIC_DOMAIN_INVALID = 0,
+ IOAPIC_DOMAIN_LEGACY = 1,
+ IOAPIC_DOMAIN_STRICT = 2,
+ IOAPIC_DOMAIN_DYNAMIC = 3,
+};
+
+struct ioapic_domain_cfg {
+ enum ioapic_domain_type type;
+ const struct irq_domain_ops *ops;
+ struct device_node *dev;
+};
+
+struct thermal_cooling_device_ops;
+
+struct thermal_cooling_device {
+ int id;
+ char type[20];
+ struct device device;
+ struct device_node *np;
+ void *devdata;
+ void *stats;
+ const struct thermal_cooling_device_ops *ops;
+ bool updated;
+ struct mutex lock;
+ struct list_head thermal_instances;
+ struct list_head node;
+};
+
+enum thermal_device_mode {
+ THERMAL_DEVICE_DISABLED = 0,
+ THERMAL_DEVICE_ENABLED = 1,
+};
+
+enum thermal_trip_type {
+ THERMAL_TRIP_ACTIVE = 0,
+ THERMAL_TRIP_PASSIVE = 1,
+ THERMAL_TRIP_HOT = 2,
+ THERMAL_TRIP_CRITICAL = 3,
+};
+
+enum thermal_trend {
+ THERMAL_TREND_STABLE = 0,
+ THERMAL_TREND_RAISING = 1,
+ THERMAL_TREND_DROPPING = 2,
+ THERMAL_TREND_RAISE_FULL = 3,
+ THERMAL_TREND_DROP_FULL = 4,
+};
+
+enum thermal_notify_event {
+ THERMAL_EVENT_UNSPECIFIED = 0,
+ THERMAL_EVENT_TEMP_SAMPLE = 1,
+ THERMAL_TRIP_VIOLATED = 2,
+ THERMAL_TRIP_CHANGED = 3,
+ THERMAL_DEVICE_DOWN = 4,
+ THERMAL_DEVICE_UP = 5,
+ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6,
+ THERMAL_TABLE_CHANGED = 7,
+};
+
+struct thermal_zone_device;
+
+struct thermal_zone_device_ops {
+ int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *);
+ int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *);
+ int (*get_temp)(struct thermal_zone_device *, int *);
+ int (*set_trips)(struct thermal_zone_device *, int, int);
+ int (*get_mode)(struct thermal_zone_device *, enum thermal_device_mode *);
+ int (*set_mode)(struct thermal_zone_device *, enum thermal_device_mode);
+ int (*get_trip_type)(struct thermal_zone_device *, int, enum thermal_trip_type *);
+ int (*get_trip_temp)(struct thermal_zone_device *, int, int *);
+ int (*set_trip_temp)(struct thermal_zone_device *, int, int);
+ int (*get_trip_hyst)(struct thermal_zone_device *, int, int *);
+ int (*set_trip_hyst)(struct thermal_zone_device *, int, int);
+ int (*get_crit_temp)(struct thermal_zone_device *, int *);
+ int (*set_emul_temp)(struct thermal_zone_device *, int);
+ int (*get_trend)(struct thermal_zone_device *, int, enum thermal_trend *);
+ int (*notify)(struct thermal_zone_device *, int, enum thermal_trip_type);
+};
+
+struct thermal_attr;
+
+struct thermal_zone_params;
+
+struct thermal_governor;
+
+struct thermal_zone_device {
+ int id;
+ char type[20];
+ struct device device;
+ struct attribute_group trips_attribute_group;
+ struct thermal_attr *trip_temp_attrs;
+ struct thermal_attr *trip_type_attrs;
+ struct thermal_attr *trip_hyst_attrs;
+ void *devdata;
+ int trips;
+ long unsigned int trips_disabled;
+ int passive_delay;
+ int polling_delay;
+ int temperature;
+ int last_temperature;
+ int emul_temperature;
+ int passive;
+ int prev_low_trip;
+ int prev_high_trip;
+ unsigned int forced_passive;
+ atomic_t need_update;
+ struct thermal_zone_device_ops *ops;
+ struct thermal_zone_params *tzp;
+ struct thermal_governor *governor;
+ void *governor_data;
+ struct list_head thermal_instances;
+ struct ida ida;
+ struct mutex lock;
+ struct list_head node;
+ struct delayed_work poll_queue;
+ enum thermal_notify_event notify_event;
+};
+
+struct thermal_cooling_device_ops {
+ int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *);
+ int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *);
+ int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int);
+ int (*get_requested_power)(struct thermal_cooling_device *, struct thermal_zone_device *, u32 *);
+ int (*state2power)(struct thermal_cooling_device *, struct thermal_zone_device *, long unsigned int, u32 *);
+ int (*power2state)(struct thermal_cooling_device *, struct thermal_zone_device *, u32, long unsigned int *);
+};
+
+struct thermal_bind_params;
+
+struct thermal_zone_params {
+ char governor_name[20];
+ bool no_hwmon;
+ int num_tbps;
+ struct thermal_bind_params *tbp;
+ u32 sustainable_power;
+ s32 k_po;
+ s32 k_pu;
+ s32 k_i;
+ s32 k_d;
+ s32 integral_cutoff;
+ int slope;
+ int offset;
+};
+
+struct thermal_governor {
+ char name[20];
+ int (*bind_to_tz)(struct thermal_zone_device *);
+ void (*unbind_from_tz)(struct thermal_zone_device *);
+ int (*throttle)(struct thermal_zone_device *, int);
+ struct list_head governor_list;
+};
+
+struct thermal_bind_params {
+ struct thermal_cooling_device *cdev;
+ int weight;
+ int trip_mask;
+ long unsigned int *binding_limits;
+ int (*match)(struct thermal_zone_device *, struct thermal_cooling_device *);
+};
+
+struct acpi_processor_cx {
+ u8 valid;
+ u8 type;
+ u32 address;
+ u8 entry_method;
+ u8 index;
+ u32 latency;
+ u8 bm_sts_skip;
+ char desc[32];
+};
+
+struct acpi_lpi_state {
+ u32 min_residency;
+ u32 wake_latency;
+ u32 flags;
+ u32 arch_flags;
+ u32 res_cnt_freq;
+ u32 enable_parent_state;
+ u64 address;
+ u8 index;
+ u8 entry_method;
+ char desc[32];
+};
+
+struct acpi_processor_power {
+ int count;
+ union {
+ struct acpi_processor_cx states[8];
+ struct acpi_lpi_state lpi_states[8];
+ };
+ int timer_broadcast_on_state;
+};
+
+struct acpi_psd_package {
+ u64 num_entries;
+ u64 revision;
+ u64 domain;
+ u64 coord_type;
+ u64 num_processors;
+};
+
+struct acpi_pct_register {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 reserved;
+ u64 address;
+} __attribute__((packed));
+
+struct acpi_processor_px {
+ u64 core_frequency;
+ u64 power;
+ u64 transition_latency;
+ u64 bus_master_latency;
+ u64 control;
+ u64 status;
+};
+
+struct acpi_processor_performance {
+ unsigned int state;
+ unsigned int platform_limit;
+ struct acpi_pct_register control_register;
+ struct acpi_pct_register status_register;
+ short: 16;
+ unsigned int state_count;
+ int: 32;
+ struct acpi_processor_px *states;
+ struct acpi_psd_package domain_info;
+ cpumask_var_t shared_cpu_map;
+ unsigned int shared_type;
+ int: 32;
+} __attribute__((packed));
+
+struct acpi_tsd_package {
+ u64 num_entries;
+ u64 revision;
+ u64 domain;
+ u64 coord_type;
+ u64 num_processors;
+};
+
+struct acpi_processor_tx_tss {
+ u64 freqpercentage;
+ u64 power;
+ u64 transition_latency;
+ u64 control;
+ u64 status;
+};
+
+struct acpi_processor_tx {
+ u16 power;
+ u16 performance;
+};
+
+struct acpi_processor;
+
+struct acpi_processor_throttling {
+ unsigned int state;
+ unsigned int platform_limit;
+ struct acpi_pct_register control_register;
+ struct acpi_pct_register status_register;
+ short: 16;
+ unsigned int state_count;
+ int: 32;
+ struct acpi_processor_tx_tss *states_tss;
+ struct acpi_tsd_package domain_info;
+ cpumask_var_t shared_cpu_map;
+ int (*acpi_processor_get_throttling)(struct acpi_processor *);
+ int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool);
+ u32 address;
+ u8 duty_offset;
+ u8 duty_width;
+ u8 tsd_valid_flag;
+ char: 8;
+ unsigned int shared_type;
+ struct acpi_processor_tx states[16];
+ int: 32;
+} __attribute__((packed));
+
+struct acpi_processor_flags {
+ u8 power: 1;
+ u8 performance: 1;
+ u8 throttling: 1;
+ u8 limit: 1;
+ u8 bm_control: 1;
+ u8 bm_check: 1;
+ u8 has_cst: 1;
+ u8 has_lpi: 1;
+ u8 power_setup_done: 1;
+ u8 bm_rld_set: 1;
+ u8 need_hotplug_init: 1;
+};
+
+struct acpi_processor_lx {
+ int px;
+ int tx;
+};
+
+struct acpi_processor_limit {
+ struct acpi_processor_lx state;
+ struct acpi_processor_lx thermal;
+ struct acpi_processor_lx user;
+};
+
+struct acpi_processor {
+ acpi_handle handle;
+ u32 acpi_id;
+ phys_cpuid_t phys_id;
+ u32 id;
+ u32 pblk;
+ int performance_platform_limit;
+ int throttling_platform_limit;
+ struct acpi_processor_flags flags;
+ struct acpi_processor_power power;
+ struct acpi_processor_performance *performance;
+ struct acpi_processor_throttling throttling;
+ struct acpi_processor_limit limit;
+ struct thermal_cooling_device *cdev;
+ struct device *dev;
+ struct freq_qos_request perflib_req;
+ struct freq_qos_request thermal_req;
+};
+
+struct acpi_processor_errata {
+ u8 smp;
+ struct {
+ u8 throttle: 1;
+ u8 fdma: 1;
+ u8 reserved: 6;
+ u32 bmisx;
+ } piix4;
+};
+
+struct cpuidle_driver;
+
+typedef void (*exitcall_t)();
+
+struct acpi_power_register {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__((packed));
+
+struct cstate_entry {
+ struct {
+ unsigned int eax;
+ unsigned int ecx;
+ } states[8];
+};
+
+typedef void (*nmi_shootdown_cb)(int, struct pt_regs *);
+
+struct pci_ops___2;
+
+struct cpuid_regs_done {
+ struct cpuid_regs regs;
+ struct completion done;
+};
+
+struct intel_early_ops {
+ resource_size_t (*stolen_size)(int, int, int);
+ resource_size_t (*stolen_base)(int, int, int, resource_size_t);
+};
+
+struct chipset {
+ u32 vendor;
+ u32 device;
+ u32 class;
+ u32 class_mask;
+ u32 flags;
+ void (*f)(int, int, int);
+};
+
+typedef void crash_vmclear_fn();
+
+struct sched_domain_shared {
+ atomic_t ref;
+ atomic_t nr_busy_cpus;
+ int has_idle_cores;
+};
+
+struct sched_group;
+
+struct sched_domain {
+ struct sched_domain *parent;
+ struct sched_domain *child;
+ struct sched_group *groups;
+ long unsigned int min_interval;
+ long unsigned int max_interval;
+ unsigned int busy_factor;
+ unsigned int imbalance_pct;
+ unsigned int cache_nice_tries;
+ int nohz_idle;
+ int flags;
+ int level;
+ long unsigned int last_balance;
+ unsigned int balance_interval;
+ unsigned int nr_balance_failed;
+ u64 max_newidle_lb_cost;
+ long unsigned int next_decay_max_lb_cost;
+ u64 avg_scan_cost;
+ char *name;
+ union {
+ void *private;
+ struct callback_head rcu;
+ };
+ struct sched_domain_shared *shared;
+ unsigned int span_weight;
+ long unsigned int span[0];
+};
+
+typedef const struct cpumask * (*sched_domain_mask_f)(int);
+
+typedef int (*sched_domain_flags_f)();
+
+struct sched_group_capacity;
+
+struct sd_data {
+ struct sched_domain **sd;
+ struct sched_domain_shared **sds;
+ struct sched_group **sg;
+ struct sched_group_capacity **sgc;
+};
+
+struct sched_domain_topology_level {
+ sched_domain_mask_f mask;
+ sched_domain_flags_f sd_flags;
+ int flags;
+ int numa_level;
+ struct sd_data data;
+ char *name;
+};
+
+struct tsc_adjust {
+ s64 bootval;
+ s64 adjusted;
+ long unsigned int nextcheck;
+ bool warned;
+};
+
+enum {
+ DUMP_PREFIX_NONE = 0,
+ DUMP_PREFIX_ADDRESS = 1,
+ DUMP_PREFIX_OFFSET = 2,
+};
+
+struct mpf_intel {
+ char signature[4];
+ unsigned int physptr;
+ unsigned char length;
+ unsigned char specification;
+ unsigned char checksum;
+ unsigned char feature1;
+ unsigned char feature2;
+ unsigned char feature3;
+ unsigned char feature4;
+ unsigned char feature5;
+};
+
+struct mpc_ioapic {
+ unsigned char type;
+ unsigned char apicid;
+ unsigned char apicver;
+ unsigned char flags;
+ unsigned int apicaddr;
+};
+
+struct mpc_lintsrc {
+ unsigned char type;
+ unsigned char irqtype;
+ short unsigned int irqflag;
+ unsigned char srcbusid;
+ unsigned char srcbusirq;
+ unsigned char destapic;
+ unsigned char destapiclint;
+};
+
+enum page_cache_mode {
+ _PAGE_CACHE_MODE_WB = 0,
+ _PAGE_CACHE_MODE_WC = 1,
+ _PAGE_CACHE_MODE_UC_MINUS = 2,
+ _PAGE_CACHE_MODE_UC = 3,
+ _PAGE_CACHE_MODE_WT = 4,
+ _PAGE_CACHE_MODE_WP = 5,
+ _PAGE_CACHE_MODE_NUM = 8,
+};
+
+enum {
+ IRQ_REMAP_XAPIC_MODE = 0,
+ IRQ_REMAP_X2APIC_MODE = 1,
+};
+
+union apic_ir {
+ long unsigned int map[4];
+ u32 regs[8];
+};
+
+enum {
+ X2APIC_OFF = 0,
+ X2APIC_ON = 1,
+ X2APIC_DISABLED = 2,
+};
+
+enum ioapic_irq_destination_types {
+ dest_Fixed = 0,
+ dest_LowestPrio = 1,
+ dest_SMI = 2,
+ dest__reserved_1 = 3,
+ dest_NMI = 4,
+ dest_INIT = 5,
+ dest__reserved_2 = 6,
+ dest_ExtINT = 7,
+};
+
+enum {
+ IRQ_SET_MASK_OK = 0,
+ IRQ_SET_MASK_OK_NOCOPY = 1,
+ IRQ_SET_MASK_OK_DONE = 2,
+};
+
+enum {
+ IRQD_TRIGGER_MASK = 15,
+ IRQD_SETAFFINITY_PENDING = 256,
+ IRQD_ACTIVATED = 512,
+ IRQD_NO_BALANCING = 1024,
+ IRQD_PER_CPU = 2048,
+ IRQD_AFFINITY_SET = 4096,
+ IRQD_LEVEL = 8192,
+ IRQD_WAKEUP_STATE = 16384,
+ IRQD_MOVE_PCNTXT = 32768,
+ IRQD_IRQ_DISABLED = 65536,
+ IRQD_IRQ_MASKED = 131072,
+ IRQD_IRQ_INPROGRESS = 262144,
+ IRQD_WAKEUP_ARMED = 524288,
+ IRQD_FORWARDED_TO_VCPU = 1048576,
+ IRQD_AFFINITY_MANAGED = 2097152,
+ IRQD_IRQ_STARTED = 4194304,
+ IRQD_MANAGED_SHUTDOWN = 8388608,
+ IRQD_SINGLE_TARGET = 16777216,
+ IRQD_DEFAULT_TRIGGER_SET = 33554432,
+ IRQD_CAN_RESERVE = 67108864,
+ IRQD_MSI_NOMASK_QUIRK = 134217728,
+ IRQD_HANDLE_ENFORCE_IRQCTX = 268435456,
+};
+
+struct irq_cfg {
+ unsigned int dest_apicid;
+ unsigned int vector;
+};
+
+enum {
+ IRQCHIP_FWNODE_REAL = 0,
+ IRQCHIP_FWNODE_NAMED = 1,
+ IRQCHIP_FWNODE_NAMED_ID = 2,
+};
+
+enum {
+ X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 1,
+ X86_IRQ_ALLOC_LEGACY = 2,
+};
+
+struct apic_chip_data {
+ struct irq_cfg hw_irq_cfg;
+ unsigned int vector;
+ unsigned int prev_vector;
+ unsigned int cpu;
+ unsigned int prev_cpu;
+ unsigned int irq;
+ struct hlist_node clist;
+ unsigned int move_in_progress: 1;
+ unsigned int is_managed: 1;
+ unsigned int can_reserve: 1;
+ unsigned int has_reserved: 1;
+};
+
+struct irq_matrix;
+
+union IO_APIC_reg_00 {
+ u32 raw;
+ struct {
+ u32 __reserved_2: 14;
+ u32 LTS: 1;
+ u32 delivery_type: 1;
+ u32 __reserved_1: 8;
+ u32 ID: 8;
+ } bits;
+};
+
+union IO_APIC_reg_01 {
+ u32 raw;
+ struct {
+ u32 version: 8;
+ u32 __reserved_2: 7;
+ u32 PRQ: 1;
+ u32 entries: 8;
+ u32 __reserved_1: 8;
+ } bits;
+};
+
+union IO_APIC_reg_02 {
+ u32 raw;
+ struct {
+ u32 __reserved_2: 24;
+ u32 arbitration: 4;
+ u32 __reserved_1: 4;
+ } bits;
+};
+
+union IO_APIC_reg_03 {
+ u32 raw;
+ struct {
+ u32 boot_DT: 1;
+ u32 __reserved_1: 31;
+ } bits;
+};
+
+struct IR_IO_APIC_route_entry {
+ __u64 vector: 8;
+ __u64 zero: 3;
+ __u64 index2: 1;
+ __u64 delivery_status: 1;
+ __u64 polarity: 1;
+ __u64 irr: 1;
+ __u64 trigger: 1;
+ __u64 mask: 1;
+ __u64 reserved: 31;
+ __u64 format: 1;
+ __u64 index: 15;
+};
+
+enum {
+ IRQ_TYPE_NONE = 0,
+ IRQ_TYPE_EDGE_RISING = 1,
+ IRQ_TYPE_EDGE_FALLING = 2,
+ IRQ_TYPE_EDGE_BOTH = 3,
+ IRQ_TYPE_LEVEL_HIGH = 4,
+ IRQ_TYPE_LEVEL_LOW = 8,
+ IRQ_TYPE_LEVEL_MASK = 12,
+ IRQ_TYPE_SENSE_MASK = 15,
+ IRQ_TYPE_DEFAULT = 15,
+ IRQ_TYPE_PROBE = 16,
+ IRQ_LEVEL = 256,
+ IRQ_PER_CPU = 512,
+ IRQ_NOPROBE = 1024,
+ IRQ_NOREQUEST = 2048,
+ IRQ_NOAUTOEN = 4096,
+ IRQ_NO_BALANCING = 8192,
+ IRQ_MOVE_PCNTXT = 16384,
+ IRQ_NESTED_THREAD = 32768,
+ IRQ_NOTHREAD = 65536,
+ IRQ_PER_CPU_DEVID = 131072,
+ IRQ_IS_POLLED = 262144,
+ IRQ_DISABLE_UNLAZY = 524288,
+};
+
+enum {
+ IRQCHIP_SET_TYPE_MASKED = 1,
+ IRQCHIP_EOI_IF_HANDLED = 2,
+ IRQCHIP_MASK_ON_SUSPEND = 4,
+ IRQCHIP_ONOFFLINE_ENABLED = 8,
+ IRQCHIP_SKIP_SET_WAKE = 16,
+ IRQCHIP_ONESHOT_SAFE = 32,
+ IRQCHIP_EOI_THREADED = 64,
+ IRQCHIP_SUPPORTS_LEVEL_MSI = 128,
+ IRQCHIP_SUPPORTS_NMI = 256,
+};
+
+struct irq_pin_list {
+ struct list_head list;
+ int apic;
+ int pin;
+};
+
+struct mp_chip_data {
+ struct list_head irq_2_pin;
+ struct IO_APIC_route_entry entry;
+ int trigger;
+ int polarity;
+ u32 count;
+ bool isa_irq;
+};
+
+struct mp_ioapic_gsi {
+ u32 gsi_base;
+ u32 gsi_end;
+};
+
+struct ioapic {
+ int nr_registers;
+ struct IO_APIC_route_entry *saved_registers;
+ struct mpc_ioapic mp_config;
+ struct mp_ioapic_gsi gsi_config;
+ struct ioapic_domain_cfg irqdomain_cfg;
+ struct irq_domain *irqdomain;
+ struct resource *iomem_res;
+};
+
+struct io_apic {
+ unsigned int index;
+ unsigned int unused[3];
+ unsigned int data;
+ unsigned int unused2[11];
+ unsigned int eoi;
+};
+
+union entry_union {
+ struct {
+ u32 w1;
+ u32 w2;
+ };
+ struct IO_APIC_route_entry entry;
+};
+
+enum {
+ IRQ_DOMAIN_FLAG_HIERARCHY = 1,
+ IRQ_DOMAIN_NAME_ALLOCATED = 2,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4,
+ IRQ_DOMAIN_FLAG_IPI_SINGLE = 8,
+ IRQ_DOMAIN_FLAG_MSI = 16,
+ IRQ_DOMAIN_FLAG_MSI_REMAP = 32,
+ IRQ_DOMAIN_MSI_NOMASK_QUIRK = 64,
+ IRQ_DOMAIN_FLAG_NONCORE = 65536,
+};
+
+typedef struct irq_alloc_info msi_alloc_info_t;
+
+struct msi_domain_info;
+
+struct msi_domain_ops {
+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *);
+ int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *);
+ void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int);
+ int (*msi_check)(struct irq_domain *, struct msi_domain_info *, struct device *);
+ int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *);
+ void (*msi_finish)(msi_alloc_info_t *, int);
+ void (*set_desc)(msi_alloc_info_t *, struct msi_desc *);
+ int (*handle_error)(struct irq_domain *, struct msi_desc *, int);
+};
+
+struct msi_domain_info {
+ u32 flags;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+enum {
+ MSI_FLAG_USE_DEF_DOM_OPS = 1,
+ MSI_FLAG_USE_DEF_CHIP_OPS = 2,
+ MSI_FLAG_MULTI_PCI_MSI = 4,
+ MSI_FLAG_PCI_MSIX = 8,
+ MSI_FLAG_ACTIVATE_EARLY = 16,
+ MSI_FLAG_MUST_REACTIVATE = 32,
+ MSI_FLAG_LEVEL_CAPABLE = 64,
+};
+
+struct hpet_channel;
+
+struct cluster_mask {
+ unsigned int clusterid;
+ int node;
+ struct cpumask mask;
+};
+
+typedef u32 pto_T_____10;
+
+typedef struct cluster_mask *pto_T_____11;
+
+struct kretprobe_instance;
+
+typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *);
+
+struct kretprobe;
+
+struct kretprobe_instance {
+ struct hlist_node hlist;
+ struct kretprobe *rp;
+ kprobe_opcode_t *ret_addr;
+ struct task_struct *task;
+ void *fp;
+ char data[0];
+};
+
+struct kretprobe {
+ struct kprobe kp;
+ kretprobe_handler_t handler;
+ kretprobe_handler_t entry_handler;
+ int maxactive;
+ int nmissed;
+ size_t data_size;
+ struct hlist_head free_instances;
+ raw_spinlock_t lock;
+};
+
+typedef struct kprobe *pto_T_____12;
+
+struct __arch_relative_insn {
+ u8 op;
+ s32 raddr;
+} __attribute__((packed));
+
+struct arch_optimized_insn {
+ kprobe_opcode_t copied_insn[4];
+ kprobe_opcode_t *insn;
+ size_t size;
+};
+
+struct optimized_kprobe {
+ struct kprobe kp;
+ struct list_head list;
+ struct arch_optimized_insn optinsn;
+};
+
+typedef __u64 Elf64_Off;
+
+struct elf64_rela {
+ Elf64_Addr r_offset;
+ Elf64_Xword r_info;
+ Elf64_Sxword r_addend;
+};
+
+typedef struct elf64_rela Elf64_Rela;
+
+struct elf64_hdr {
+ unsigned char e_ident[16];
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry;
+ Elf64_Off e_phoff;
+ Elf64_Off e_shoff;
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+};
+
+typedef struct elf64_hdr Elf64_Ehdr;
+
+struct elf64_shdr {
+ Elf64_Word sh_name;
+ Elf64_Word sh_type;
+ Elf64_Xword sh_flags;
+ Elf64_Addr sh_addr;
+ Elf64_Off sh_offset;
+ Elf64_Xword sh_size;
+ Elf64_Word sh_link;
+ Elf64_Word sh_info;
+ Elf64_Xword sh_addralign;
+ Elf64_Xword sh_entsize;
+};
+
+typedef struct elf64_shdr Elf64_Shdr;
+
+enum hpet_mode {
+ HPET_MODE_UNUSED = 0,
+ HPET_MODE_LEGACY = 1,
+ HPET_MODE_CLOCKEVT = 2,
+ HPET_MODE_DEVICE = 3,
+};
+
+struct hpet_channel___2 {
+ struct clock_event_device evt;
+ unsigned int num;
+ unsigned int cpu;
+ unsigned int irq;
+ unsigned int in_use;
+ enum hpet_mode mode;
+ unsigned int boot_cfg;
+ char name[10];
+ long: 48;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct hpet_base {
+ unsigned int nr_channels;
+ unsigned int nr_clockevents;
+ unsigned int boot_cfg;
+ struct hpet_channel___2 *channels;
+};
+
+union hpet_lock {
+ struct {
+ arch_spinlock_t lock;
+ u32 value;
+ };
+ u64 lockval;
+};
+
+struct amd_northbridge_info {
+ u16 num;
+ u64 flags;
+ struct amd_northbridge *nb;
+};
+
+struct swait_queue {
+ struct task_struct *task;
+ struct list_head task_list;
+};
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u8 preempted;
+ __u8 u8_pad[3];
+ __u32 pad[11];
+};
+
+struct kvm_vcpu_pv_apf_data {
+ __u32 flags;
+ __u32 token;
+ __u8 pad[56];
+ __u32 enabled;
+};
+
+struct kvm_task_sleep_node {
+ struct hlist_node link;
+ struct swait_queue_head wq;
+ u32 token;
+ int cpu;
+};
+
+struct kvm_task_sleep_head {
+ raw_spinlock_t lock;
+ struct hlist_head list;
+};
+
+typedef __u32 pto_T_____13;
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+};
+
+typedef struct pvclock_vsyscall_time_info *pto_T_____14;
+
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE = 0,
+ PARAVIRT_LAZY_MMU = 1,
+ PARAVIRT_LAZY_CPU = 2,
+};
+
+struct branch {
+ unsigned char opcode;
+ u32 delta;
+} __attribute__((packed));
+
+typedef enum paravirt_lazy_mode pto_T_____15;
+
+typedef long unsigned int ulong;
+
+struct property_entry;
+
+struct platform_device_info {
+ struct device *parent;
+ struct fwnode_handle *fwnode;
+ bool of_node_reused;
+ const char *name;
+ int id;
+ const struct resource *res;
+ unsigned int num_res;
+ const void *data;
+ size_t size_data;
+ u64 dma_mask;
+ const struct property_entry *properties;
+};
+
+enum dev_prop_type {
+ DEV_PROP_U8 = 0,
+ DEV_PROP_U16 = 1,
+ DEV_PROP_U32 = 2,
+ DEV_PROP_U64 = 3,
+ DEV_PROP_STRING = 4,
+ DEV_PROP_REF = 5,
+};
+
+struct property_entry {
+ const char *name;
+ size_t length;
+ bool is_inline;
+ enum dev_prop_type type;
+ union {
+ const void *pointer;
+ union {
+ u8 u8_data[8];
+ u16 u16_data[4];
+ u32 u32_data[2];
+ u64 u64_data[1];
+ const char *str[1];
+ } value;
+ };
+};
+
+struct uprobe_xol_ops;
+
+struct arch_uprobe {
+ union {
+ u8 insn[16];
+ u8 ixol[16];
+ };
+ const struct uprobe_xol_ops *ops;
+ union {
+ struct {
+ s32 offs;
+ u8 ilen;
+ u8 opc1;
+ } branch;
+ struct {
+ u8 fixups;
+ u8 ilen;
+ } defparam;
+ struct {
+ u8 reg_offset;
+ u8 ilen;
+ } push;
+ };
+};
+
+struct uprobe_xol_ops {
+ bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
+ int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
+ int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
+ void (*abort)(struct arch_uprobe *, struct pt_regs *);
+};
+
+enum rp_check {
+ RP_CHECK_CALL = 0,
+ RP_CHECK_CHAIN_CALL = 1,
+ RP_CHECK_RET = 2,
+};
+
+struct fb_fix_screeninfo {
+ char id[16];
+ long unsigned int smem_start;
+ __u32 smem_len;
+ __u32 type;
+ __u32 type_aux;
+ __u32 visual;
+ __u16 xpanstep;
+ __u16 ypanstep;
+ __u16 ywrapstep;
+ __u32 line_length;
+ long unsigned int mmio_start;
+ __u32 mmio_len;
+ __u32 accel;
+ __u16 capabilities;
+ __u16 reserved[2];
+};
+
+struct fb_bitfield {
+ __u32 offset;
+ __u32 length;
+ __u32 msb_right;
+};
+
+struct fb_var_screeninfo {
+ __u32 xres;
+ __u32 yres;
+ __u32 xres_virtual;
+ __u32 yres_virtual;
+ __u32 xoffset;
+ __u32 yoffset;
+ __u32 bits_per_pixel;
+ __u32 grayscale;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+ __u32 nonstd;
+ __u32 activate;
+ __u32 height;
+ __u32 width;
+ __u32 accel_flags;
+ __u32 pixclock;
+ __u32 left_margin;
+ __u32 right_margin;
+ __u32 upper_margin;
+ __u32 lower_margin;
+ __u32 hsync_len;
+ __u32 vsync_len;
+ __u32 sync;
+ __u32 vmode;
+ __u32 rotate;
+ __u32 colorspace;
+ __u32 reserved[4];
+};
+
+struct fb_cmap {
+ __u32 start;
+ __u32 len;
+ __u16 *red;
+ __u16 *green;
+ __u16 *blue;
+ __u16 *transp;
+};
+
+struct fb_copyarea {
+ __u32 dx;
+ __u32 dy;
+ __u32 width;
+ __u32 height;
+ __u32 sx;
+ __u32 sy;
+};
+
+struct fb_fillrect {
+ __u32 dx;
+ __u32 dy;
+ __u32 width;
+ __u32 height;
+ __u32 color;
+ __u32 rop;
+};
+
+struct fb_image {
+ __u32 dx;
+ __u32 dy;
+ __u32 width;
+ __u32 height;
+ __u32 fg_color;
+ __u32 bg_color;
+ __u8 depth;
+ const char *data;
+ struct fb_cmap cmap;
+};
+
+struct fbcurpos {
+ __u16 x;
+ __u16 y;
+};
+
+struct fb_cursor {
+ __u16 set;
+ __u16 enable;
+ __u16 rop;
+ const char *mask;
+ struct fbcurpos hot;
+ struct fb_image image;
+};
+
+struct fb_chroma {
+ __u32 redx;
+ __u32 greenx;
+ __u32 bluex;
+ __u32 whitex;
+ __u32 redy;
+ __u32 greeny;
+ __u32 bluey;
+ __u32 whitey;
+};
+
+struct fb_videomode;
+
+struct fb_monspecs {
+ struct fb_chroma chroma;
+ struct fb_videomode *modedb;
+ __u8 manufacturer[4];
+ __u8 monitor[14];
+ __u8 serial_no[14];
+ __u8 ascii[14];
+ __u32 modedb_len;
+ __u32 model;
+ __u32 serial;
+ __u32 year;
+ __u32 week;
+ __u32 hfmin;
+ __u32 hfmax;
+ __u32 dclkmin;
+ __u32 dclkmax;
+ __u16 input;
+ __u16 dpms;
+ __u16 signal;
+ __u16 vfmin;
+ __u16 vfmax;
+ __u16 gamma;
+ __u16 gtf: 1;
+ __u16 misc;
+ __u8 version;
+ __u8 revision;
+ __u8 max_x;
+ __u8 max_y;
+};
+
+struct fb_info;
+
+struct fb_pixmap {
+ u8 *addr;
+ u32 size;
+ u32 offset;
+ u32 buf_align;
+ u32 scan_align;
+ u32 access_align;
+ u32 flags;
+ u32 blit_x;
+ u32 blit_y;
+ void (*writeio)(struct fb_info *, void *, void *, unsigned int);
+ void (*readio)(struct fb_info *, void *, void *, unsigned int);
+};
+
+struct fb_ops;
+
+struct apertures_struct;
+
+struct fb_info {
+ atomic_t count;
+ int node;
+ int flags;
+ int fbcon_rotate_hint;
+ struct mutex lock;
+ struct mutex mm_lock;
+ struct fb_var_screeninfo var;
+ struct fb_fix_screeninfo fix;
+ struct fb_monspecs monspecs;
+ struct work_struct queue;
+ struct fb_pixmap pixmap;
+ struct fb_pixmap sprite;
+ struct fb_cmap cmap;
+ struct list_head modelist;
+ struct fb_videomode *mode;
+ const struct fb_ops *fbops;
+ struct device *device;
+ struct device *dev;
+ int class_flag;
+ union {
+ char *screen_base;
+ char *screen_buffer;
+ };
+ long unsigned int screen_size;
+ void *pseudo_palette;
+ u32 state;
+ void *fbcon_par;
+ void *par;
+ struct apertures_struct *apertures;
+ bool skip_vt_switch;
+};
+
+struct fb_videomode {
+ const char *name;
+ u32 refresh;
+ u32 xres;
+ u32 yres;
+ u32 pixclock;
+ u32 left_margin;
+ u32 right_margin;
+ u32 upper_margin;
+ u32 lower_margin;
+ u32 hsync_len;
+ u32 vsync_len;
+ u32 sync;
+ u32 vmode;
+ u32 flag;
+};
+
+struct fb_blit_caps {
+ u32 x;
+ u32 y;
+ u32 len;
+ u32 flags;
+};
+
+struct fb_ops {
+ struct module *owner;
+ int (*fb_open)(struct fb_info *, int);
+ int (*fb_release)(struct fb_info *, int);
+ ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *);
+ ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *);
+ int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *);
+ int (*fb_set_par)(struct fb_info *);
+ int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *);
+ int (*fb_setcmap)(struct fb_cmap *, struct fb_info *);
+ int (*fb_blank)(int, struct fb_info *);
+ int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *);
+ void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *);
+ void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *);
+ void (*fb_imageblit)(struct fb_info *, const struct fb_image *);
+ int (*fb_cursor)(struct fb_info *, struct fb_cursor *);
+ int (*fb_sync)(struct fb_info *);
+ int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int);
+ int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int);
+ int (*fb_mmap)(struct fb_info *, struct vm_area_struct *);
+ void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *);
+ void (*fb_destroy)(struct fb_info *);
+ int (*fb_debug_enter)(struct fb_info *);
+ int (*fb_debug_leave)(struct fb_info *);
+};
+
+struct aperture {
+ resource_size_t base;
+ resource_size_t size;
+};
+
+struct apertures_struct {
+ unsigned int count;
+ struct aperture ranges[0];
+};
+
+struct dmt_videomode {
+ u32 dmt_id;
+ u32 std_2byte_code;
+ u32 cvt_3byte_code;
+ const struct fb_videomode *mode;
+};
+
+struct simplefb_platform_data {
+ u32 width;
+ u32 height;
+ u32 stride;
+ const char *format;
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ long unsigned int base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+enum {
+ M_I17 = 0,
+ M_I20 = 1,
+ M_I20_SR = 2,
+ M_I24 = 3,
+ M_I24_8_1 = 4,
+ M_I24_10_1 = 5,
+ M_I27_11_1 = 6,
+ M_MINI = 7,
+ M_MINI_3_1 = 8,
+ M_MINI_4_1 = 9,
+ M_MB = 10,
+ M_MB_2 = 11,
+ M_MB_3 = 12,
+ M_MB_5_1 = 13,
+ M_MB_6_1 = 14,
+ M_MB_7_1 = 15,
+ M_MB_SR = 16,
+ M_MBA = 17,
+ M_MBA_3 = 18,
+ M_MBP = 19,
+ M_MBP_2 = 20,
+ M_MBP_2_2 = 21,
+ M_MBP_SR = 22,
+ M_MBP_4 = 23,
+ M_MBP_5_1 = 24,
+ M_MBP_5_2 = 25,
+ M_MBP_5_3 = 26,
+ M_MBP_6_1 = 27,
+ M_MBP_6_2 = 28,
+ M_MBP_7_1 = 29,
+ M_MBP_8_2 = 30,
+ M_UNKNOWN = 31,
+};
+
+enum {
+ OVERRIDE_NONE = 0,
+ OVERRIDE_BASE = 1,
+ OVERRIDE_STRIDE = 2,
+ OVERRIDE_HEIGHT = 4,
+ OVERRIDE_WIDTH = 8,
+};
+
+enum perf_sample_regs_abi {
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ PERF_SAMPLE_REGS_ABI_64 = 2,
+};
+
+struct va_format {
+ const char *fmt;
+ va_list *va;
+};
+
+typedef u8 uint8_t;
+
+typedef u16 uint16_t;
+
+enum pg_level {
+ PG_LEVEL_NONE = 0,
+ PG_LEVEL_4K = 1,
+ PG_LEVEL_2M = 2,
+ PG_LEVEL_1G = 3,
+ PG_LEVEL_512G = 4,
+ PG_LEVEL_NUM = 5,
+};
+
+struct trace_print_flags {
+ long unsigned int mask;
+ const char *name;
+};
+
+enum tlb_flush_reason {
+ TLB_FLUSH_ON_TASK_SWITCH = 0,
+ TLB_REMOTE_SHOOTDOWN = 1,
+ TLB_LOCAL_SHOOTDOWN = 2,
+ TLB_LOCAL_MM_SHOOTDOWN = 3,
+ TLB_REMOTE_SEND_IPI = 4,
+ NR_TLB_FLUSH_REASONS = 5,
+};
+
+enum {
+ REGION_INTERSECTS = 0,
+ REGION_DISJOINT = 1,
+ REGION_MIXED = 2,
+};
+
+struct trace_event_raw_tlb_flush {
+ struct trace_entry ent;
+ int reason;
+ long unsigned int pages;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_tlb_flush {};
+
+typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int);
+
+struct map_range {
+ long unsigned int start;
+ long unsigned int end;
+ unsigned int page_size_mask;
+};
+
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = 12,
+ MIX_SECTION_INFO = 13,
+ NODE_INFO = 14,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14,
+};
+
+struct mhp_params {
+ struct vmem_altmap *altmap;
+ pgprot_t pgprot;
+};
+
+enum kcore_type {
+ KCORE_TEXT = 0,
+ KCORE_VMALLOC = 1,
+ KCORE_RAM = 2,
+ KCORE_VMEMMAP = 3,
+ KCORE_USER = 4,
+ KCORE_OTHER = 5,
+ KCORE_REMAP = 6,
+};
+
+struct kcore_list {
+ struct list_head list;
+ long unsigned int addr;
+ long unsigned int vaddr;
+ size_t size;
+ int type;
+};
+
+struct x86_mapping_info {
+ void * (*alloc_pgt_page)(void *);
+ void *context;
+ long unsigned int page_flag;
+ long unsigned int offset;
+ bool direct_gbpages;
+ long unsigned int kernpg_flag;
+};
+
+struct trace_event_raw_x86_exceptions {
+ struct trace_entry ent;
+ long unsigned int address;
+ long unsigned int ip;
+ long unsigned int error_code;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_x86_exceptions {};
+
+typedef void (*btf_trace_page_fault_user)(void *, long unsigned int, struct pt_regs *, long unsigned int);
+
+typedef void (*btf_trace_page_fault_kernel)(void *, long unsigned int, struct pt_regs *, long unsigned int);
+
+enum {
+ IORES_MAP_SYSTEM_RAM = 1,
+ IORES_MAP_ENCRYPTED = 2,
+};
+
+struct ioremap_desc {
+ unsigned int flags;
+};
+
+enum xen_domain_type {
+ XEN_NATIVE = 0,
+ XEN_PV_DOMAIN = 1,
+ XEN_HVM_DOMAIN = 2,
+};
+
+typedef bool (*ex_handler_t)(const struct exception_table_entry *, struct pt_regs *, int, long unsigned int, long unsigned int);
+
+typedef u16 pto_T_____16;
+
+typedef struct mm_struct *pto_T_____17;
+
+struct exception_stacks {
+ char DF_stack_guard[0];
+ char DF_stack[4096];
+ char NMI_stack_guard[0];
+ char NMI_stack[4096];
+ char DB_stack_guard[0];
+ char DB_stack[4096];
+ char MCE_stack_guard[0];
+ char MCE_stack[4096];
+ char IST_top_guard[0];
+};
+
+struct cpa_data {
+ long unsigned int *vaddr;
+ pgd_t *pgd;
+ pgprot_t mask_set;
+ pgprot_t mask_clr;
+ long unsigned int numpages;
+ long unsigned int curpage;
+ long unsigned int pfn;
+ unsigned int flags;
+ unsigned int force_split: 1;
+ unsigned int force_static_prot: 1;
+ unsigned int force_flush_all: 1;
+ struct page **pages;
+};
+
+enum cpa_warn {
+ CPA_CONFLICT = 0,
+ CPA_PROTECT = 1,
+ CPA_DETECT = 2,
+};
+
+typedef struct {
+ u64 val;
+} pfn_t;
+
+struct memtype {
+ u64 start;
+ u64 end;
+ u64 subtree_max_end;
+ enum page_cache_mode type;
+ struct rb_node rb;
+};
+
+enum {
+ PAT_UC = 0,
+ PAT_WC = 1,
+ PAT_WT = 4,
+ PAT_WP = 5,
+ PAT_WB = 6,
+ PAT_UC_MINUS = 7,
+};
+
+struct pagerange_state {
+ long unsigned int cur_pfn;
+ int ram;
+ int not_ram;
+};
+
+struct rb_augment_callbacks {
+ void (*propagate)(struct rb_node *, struct rb_node *);
+ void (*copy)(struct rb_node *, struct rb_node *);
+ void (*rotate)(struct rb_node *, struct rb_node *);
+};
+
+enum {
+ MEMTYPE_EXACT_MATCH = 0,
+ MEMTYPE_END_MATCH = 1,
+};
+
+struct hugepage_subpool {
+ spinlock_t lock;
+ long int count;
+ long int max_hpages;
+ long int used_hpages;
+ struct hstate *hstate;
+ long int min_hpages;
+ long int rsv_hpages;
+};
+
+struct hugetlbfs_sb_info {
+ long int max_inodes;
+ long int free_inodes;
+ spinlock_t stat_lock;
+ struct hstate *hstate;
+ struct hugepage_subpool *spool;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
+struct kmmio_probe;
+
+typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, struct pt_regs *, long unsigned int);
+
+typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, long unsigned int, struct pt_regs *);
+
+struct kmmio_probe {
+ struct list_head list;
+ long unsigned int addr;
+ long unsigned int len;
+ kmmio_pre_handler_t pre_handler;
+ kmmio_post_handler_t post_handler;
+ void *private;
+};
+
+struct kmmio_fault_page {
+ struct list_head list;
+ struct kmmio_fault_page *release_next;
+ long unsigned int addr;
+ pteval_t old_presence;
+ bool armed;
+ int count;
+ bool scheduled_for_release;
+};
+
+struct kmmio_delayed_release {
+ struct callback_head rcu;
+ struct kmmio_fault_page *release_list;
+};
+
+struct kmmio_context {
+ struct kmmio_fault_page *fpage;
+ struct kmmio_probe *probe;
+ long unsigned int saved_flags;
+ long unsigned int addr;
+ int active;
+};
+
+enum reason_type {
+ NOT_ME = 0,
+ NOTHING = 1,
+ REG_READ = 2,
+ REG_WRITE = 3,
+ IMM_WRITE = 4,
+ OTHERS = 5,
+};
+
+struct prefix_bits {
+ unsigned int shorted: 1;
+ unsigned int enlarged: 1;
+ unsigned int rexr: 1;
+ unsigned int rex: 1;
+};
+
+enum {
+ arg_AL = 0,
+ arg_CL = 1,
+ arg_DL = 2,
+ arg_BL = 3,
+ arg_AH = 4,
+ arg_CH = 5,
+ arg_DH = 6,
+ arg_BH = 7,
+ arg_AX = 0,
+ arg_CX = 1,
+ arg_DX = 2,
+ arg_BX = 3,
+ arg_SP = 4,
+ arg_BP = 5,
+ arg_SI = 6,
+ arg_DI = 7,
+ arg_R8 = 8,
+ arg_R9 = 9,
+ arg_R10 = 10,
+ arg_R11 = 11,
+ arg_R12 = 12,
+ arg_R13 = 13,
+ arg_R14 = 14,
+ arg_R15 = 15,
+};
+
+enum mm_io_opcode {
+ MMIO_READ = 1,
+ MMIO_WRITE = 2,
+ MMIO_PROBE = 3,
+ MMIO_UNPROBE = 4,
+ MMIO_UNKNOWN_OP = 5,
+};
+
+struct mmiotrace_rw {
+ resource_size_t phys;
+ long unsigned int value;
+ long unsigned int pc;
+ int map_id;
+ unsigned char opcode;
+ unsigned char width;
+};
+
+struct mmiotrace_map {
+ resource_size_t phys;
+ long unsigned int virt;
+ long unsigned int len;
+ int map_id;
+ unsigned char opcode;
+};
+
+struct trap_reason {
+ long unsigned int addr;
+ long unsigned int ip;
+ enum reason_type type;
+ int active_traces;
+};
+
+struct remap_trace {
+ struct list_head list;
+ struct kmmio_probe probe;
+ resource_size_t phys;
+ long unsigned int id;
+};
+
+enum pti_mode {
+ PTI_AUTO = 0,
+ PTI_FORCE_OFF = 1,
+ PTI_FORCE_ON = 2,
+};
+
+enum pti_clone_level {
+ PTI_CLONE_PMD = 0,
+ PTI_CLONE_PTE = 1,
+};
+
+typedef struct {
+ efi_guid_t guid;
+ u64 table;
+} efi_config_table_64_t;
+
+struct efi_memory_map_data {
+ phys_addr_t phys_map;
+ long unsigned int size;
+ long unsigned int desc_version;
+ long unsigned int desc_size;
+ long unsigned int flags;
+};
+
+struct efi_mem_range {
+ struct range range;
+ u64 attribute;
+};
+
+struct efi_setup_data {
+ u64 fw_vendor;
+ u64 __unused;
+ u64 tables;
+ u64 smbios;
+ u64 reserved[8];
+};
+
+typedef struct {
+ efi_guid_t guid;
+ long unsigned int *ptr;
+ const char name[16];
+} efi_config_table_type_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 fw_vendor;
+ u32 fw_revision;
+ u32 __pad1;
+ u64 con_in_handle;
+ u64 con_in;
+ u64 con_out_handle;
+ u64 con_out;
+ u64 stderr_handle;
+ u64 stderr;
+ u64 runtime;
+ u64 boottime;
+ u32 nr_tables;
+ u32 __pad2;
+ u64 tables;
+} efi_system_table_64_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 fw_vendor;
+ u32 fw_revision;
+ u32 con_in_handle;
+ u32 con_in;
+ u32 con_out_handle;
+ u32 con_out;
+ u32 stderr_handle;
+ u32 stderr;
+ u32 runtime;
+ u32 boottime;
+ u32 nr_tables;
+ u32 tables;
+} efi_system_table_32_t;
+
+typedef struct {
+ u32 version;
+ u32 length;
+ u64 memory_protection_attribute;
+} efi_properties_table_t;
+
+union efi_boot_services;
+
+typedef union efi_boot_services efi_boot_services_t;
+
+union efi_simple_text_input_protocol;
+
+typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t;
+
+union efi_simple_text_output_protocol;
+
+typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
+
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ long unsigned int fw_vendor;
+ u32 fw_revision;
+ long unsigned int con_in_handle;
+ efi_simple_text_input_protocol_t *con_in;
+ long unsigned int con_out_handle;
+ efi_simple_text_output_protocol_t *con_out;
+ long unsigned int stderr_handle;
+ long unsigned int stderr;
+ efi_runtime_services_t *runtime;
+ efi_boot_services_t *boottime;
+ long unsigned int nr_tables;
+ long unsigned int tables;
+ };
+ efi_system_table_32_t mixed_mode;
+} efi_system_table_t;
+
+enum {
+ BPF_REG_0 = 0,
+ BPF_REG_1 = 1,
+ BPF_REG_2 = 2,
+ BPF_REG_3 = 3,
+ BPF_REG_4 = 4,
+ BPF_REG_5 = 5,
+ BPF_REG_6 = 6,
+ BPF_REG_7 = 7,
+ BPF_REG_8 = 8,
+ BPF_REG_9 = 9,
+ BPF_REG_10 = 10,
+ __MAX_BPF_REG = 11,
+};
+
+struct bpf_tramp_progs {
+ struct bpf_prog *progs[40];
+ int nr_progs;
+};
+
+enum bpf_jit_poke_reason {
+ BPF_POKE_REASON_TAIL_CALL = 0,
+};
+
+struct bpf_array_aux {
+ enum bpf_prog_type type;
+ bool jited;
+ struct list_head poke_progs;
+ struct bpf_map *map;
+ struct mutex poke_mutex;
+ struct work_struct work;
+};
+
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ u32 index_mask;
+ struct bpf_array_aux *aux;
+ union {
+ char value[0];
+ void *ptrs[0];
+ void *pptrs[0];
+ };
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum bpf_text_poke_type {
+ BPF_MOD_CALL = 0,
+ BPF_MOD_JUMP = 1,
+};
+
+struct bpf_binary_header {
+ u32 pages;
+ int: 32;
+ u8 image[0];
+};
+
+struct jit_context {
+ int cleanup_addr;
+};
+
+struct x64_jit_data {
+ struct bpf_binary_header *header;
+ int *addrs;
+ u8 *image;
+ int proglen;
+ struct jit_context ctx;
+};
+
+enum tk_offsets {
+ TK_OFFS_REAL = 0,
+ TK_OFFS_BOOT = 1,
+ TK_OFFS_TAI = 2,
+ TK_OFFS_MAX = 3,
+};
+
+struct clone_args {
+ __u64 flags;
+ __u64 pidfd;
+ __u64 child_tid;
+ __u64 parent_tid;
+ __u64 exit_signal;
+ __u64 stack;
+ __u64 stack_size;
+ __u64 tls;
+ __u64 set_tid;
+ __u64 set_tid_size;
+ __u64 cgroup;
+};
+
+enum hrtimer_mode {
+ HRTIMER_MODE_ABS = 0,
+ HRTIMER_MODE_REL = 1,
+ HRTIMER_MODE_PINNED = 2,
+ HRTIMER_MODE_SOFT = 4,
+ HRTIMER_MODE_HARD = 8,
+ HRTIMER_MODE_ABS_PINNED = 2,
+ HRTIMER_MODE_REL_PINNED = 3,
+ HRTIMER_MODE_ABS_SOFT = 4,
+ HRTIMER_MODE_REL_SOFT = 5,
+ HRTIMER_MODE_ABS_PINNED_SOFT = 6,
+ HRTIMER_MODE_REL_PINNED_SOFT = 7,
+ HRTIMER_MODE_ABS_HARD = 8,
+ HRTIMER_MODE_REL_HARD = 9,
+ HRTIMER_MODE_ABS_PINNED_HARD = 10,
+ HRTIMER_MODE_REL_PINNED_HARD = 11,
+};
+
+struct fdtable {
+ unsigned int max_fds;
+ struct file **fd;
+ long unsigned int *close_on_exec;
+ long unsigned int *open_fds;
+ long unsigned int *full_fds_bits;
+ struct callback_head rcu;
+};
+
+struct files_struct {
+ atomic_t count;
+ bool resize_in_progress;
+ wait_queue_head_t resize_wait;
+ struct fdtable *fdt;
+ struct fdtable fdtab;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ spinlock_t file_lock;
+ unsigned int next_fd;
+ long unsigned int close_on_exec_init[1];
+ long unsigned int open_fds_init[1];
+ long unsigned int full_fds_bits_init[1];
+ struct file *fd_array[64];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct robust_list {
+ struct robust_list *next;
+};
+
+struct robust_list_head {
+ struct robust_list list;
+ long int futex_offset;
+ struct robust_list *list_op_pending;
+};
+
+struct kernel_clone_args {
+ u64 flags;
+ int *pidfd;
+ int *child_tid;
+ int *parent_tid;
+ int exit_signal;
+ long unsigned int stack;
+ long unsigned int stack_size;
+ long unsigned int tls;
+ pid_t *set_tid;
+ size_t set_tid_size;
+ int cgroup;
+ struct cgroup *cgrp;
+ struct css_set *cset;
+};
+
+struct multiprocess_signals {
+ sigset_t signal;
+ struct hlist_node node;
+};
+
+typedef int (*proc_visitor)(struct task_struct *, void *);
+
+enum {
+ IOPRIO_CLASS_NONE = 0,
+ IOPRIO_CLASS_RT = 1,
+ IOPRIO_CLASS_BE = 2,
+ IOPRIO_CLASS_IDLE = 3,
+};
+
+struct mempolicy {};
+
+typedef struct poll_table_struct poll_table;
+
+enum {
+ FUTEX_STATE_OK = 0,
+ FUTEX_STATE_EXITING = 1,
+ FUTEX_STATE_DEAD = 2,
+};
+
+enum proc_hidepid {
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+ HIDEPID_NOT_PTRACEABLE = 4,
+};
+
+enum proc_pidonly {
+ PROC_PIDONLY_OFF = 0,
+ PROC_PIDONLY_ON = 1,
+};
+
+struct proc_fs_info {
+ struct pid_namespace *pid_ns;
+ struct dentry *proc_self;
+ struct dentry *proc_thread_self;
+ kgid_t pid_gid;
+ enum proc_hidepid hide_pid;
+ enum proc_pidonly pidonly;
+};
+
+struct trace_event_raw_task_newtask {
+ struct trace_entry ent;
+ pid_t pid;
+ char comm[16];
+ long unsigned int clone_flags;
+ short int oom_score_adj;
+ char __data[0];
+};
+
+struct trace_event_raw_task_rename {
+ struct trace_entry ent;
+ pid_t pid;
+ char oldcomm[16];
+ char newcomm[16];
+ short int oom_score_adj;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_task_newtask {};
+
+struct trace_event_data_offsets_task_rename {};
+
+typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int);
+
+typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *);
+
+typedef long unsigned int pao_T_____3;
+
+enum kmsg_dump_reason {
+ KMSG_DUMP_UNDEF = 0,
+ KMSG_DUMP_PANIC = 1,
+ KMSG_DUMP_OOPS = 2,
+ KMSG_DUMP_EMERG = 3,
+ KMSG_DUMP_SHUTDOWN = 4,
+ KMSG_DUMP_MAX = 5,
+};
+
+struct vt_mode {
+ char mode;
+ char waitv;
+ short int relsig;
+ short int acqsig;
+ short int frsig;
+};
+
+struct console_font {
+ unsigned int width;
+ unsigned int height;
+ unsigned int charcount;
+ unsigned char *data;
+};
+
+struct uni_pagedir;
+
+struct uni_screen;
+
+struct vc_data {
+ struct tty_port port;
+ short unsigned int vc_num;
+ unsigned int vc_cols;
+ unsigned int vc_rows;
+ unsigned int vc_size_row;
+ unsigned int vc_scan_lines;
+ long unsigned int vc_origin;
+ long unsigned int vc_scr_end;
+ long unsigned int vc_visible_origin;
+ unsigned int vc_top;
+ unsigned int vc_bottom;
+ const struct consw *vc_sw;
+ short unsigned int *vc_screenbuf;
+ unsigned int vc_screenbuf_size;
+ unsigned char vc_mode;
+ unsigned char vc_attr;
+ unsigned char vc_def_color;
+ unsigned char vc_color;
+ unsigned char vc_s_color;
+ unsigned char vc_ulcolor;
+ unsigned char vc_itcolor;
+ unsigned char vc_halfcolor;
+ unsigned int vc_cursor_type;
+ short unsigned int vc_complement_mask;
+ short unsigned int vc_s_complement_mask;
+ unsigned int vc_x;
+ unsigned int vc_y;
+ unsigned int vc_saved_x;
+ unsigned int vc_saved_y;
+ long unsigned int vc_pos;
+ short unsigned int vc_hi_font_mask;
+ struct console_font vc_font;
+ short unsigned int vc_video_erase_char;
+ unsigned int vc_state;
+ unsigned int vc_npar;
+ unsigned int vc_par[16];
+ struct vt_mode vt_mode;
+ struct pid *vt_pid;
+ int vt_newvt;
+ wait_queue_head_t paste_wait;
+ unsigned int vc_charset: 1;
+ unsigned int vc_s_charset: 1;
+ unsigned int vc_disp_ctrl: 1;
+ unsigned int vc_toggle_meta: 1;
+ unsigned int vc_decscnm: 1;
+ unsigned int vc_decom: 1;
+ unsigned int vc_decawm: 1;
+ unsigned int vc_deccm: 1;
+ unsigned int vc_decim: 1;
+ unsigned int vc_intensity: 2;
+ unsigned int vc_italic: 1;
+ unsigned int vc_underline: 1;
+ unsigned int vc_blink: 1;
+ unsigned int vc_reverse: 1;
+ unsigned int vc_s_intensity: 2;
+ unsigned int vc_s_italic: 1;
+ unsigned int vc_s_underline: 1;
+ unsigned int vc_s_blink: 1;
+ unsigned int vc_s_reverse: 1;
+ unsigned int vc_priv: 3;
+ unsigned int vc_need_wrap: 1;
+ unsigned int vc_can_do_color: 1;
+ unsigned int vc_report_mouse: 2;
+ unsigned char vc_utf: 1;
+ unsigned char vc_utf_count;
+ int vc_utf_char;
+ unsigned int vc_tab_stop[8];
+ unsigned char vc_palette[48];
+ short unsigned int *vc_translate;
+ unsigned char vc_G0_charset;
+ unsigned char vc_G1_charset;
+ unsigned char vc_saved_G0;
+ unsigned char vc_saved_G1;
+ unsigned int vc_resize_user;
+ unsigned int vc_bell_pitch;
+ unsigned int vc_bell_duration;
+ short unsigned int vc_cur_blink_ms;
+ struct vc_data **vc_display_fg;
+ struct uni_pagedir *vc_uni_pagedir;
+ struct uni_pagedir **vc_uni_pagedir_loc;
+ struct uni_screen *vc_uni_screen;
+};
+
+struct vc {
+ struct vc_data *d;
+ struct work_struct SAK_work;
+};
+
+struct vt_spawn_console {
+ spinlock_t lock;
+ struct pid *pid;
+ int sig;
+};
+
+enum con_flush_mode {
+ CONSOLE_FLUSH_PENDING = 0,
+ CONSOLE_REPLAY_ALL = 1,
+};
+
+struct warn_args {
+ const char *fmt;
+ va_list args;
+};
+
+enum kobject_action {
+ KOBJ_ADD = 0,
+ KOBJ_REMOVE = 1,
+ KOBJ_CHANGE = 2,
+ KOBJ_MOVE = 3,
+ KOBJ_ONLINE = 4,
+ KOBJ_OFFLINE = 5,
+ KOBJ_BIND = 6,
+ KOBJ_UNBIND = 7,
+ KOBJ_MAX = 8,
+};
+
+struct smp_hotplug_thread {
+ struct task_struct **store;
+ struct list_head list;
+ int (*thread_should_run)(unsigned int);
+ void (*thread_fn)(unsigned int);
+ void (*create)(unsigned int);
+ void (*setup)(unsigned int);
+ void (*cleanup)(unsigned int, bool);
+ void (*park)(unsigned int);
+ void (*unpark)(unsigned int);
+ bool selfparking;
+ const char *thread_comm;
+};
+
+struct trace_event_raw_cpuhp_enter {
+ struct trace_entry ent;
+ unsigned int cpu;
+ int target;
+ int idx;
+ void *fun;
+ char __data[0];
+};
+
+struct trace_event_raw_cpuhp_multi_enter {
+ struct trace_entry ent;
+ unsigned int cpu;
+ int target;
+ int idx;
+ void *fun;
+ char __data[0];
+};
+
+struct trace_event_raw_cpuhp_exit {
+ struct trace_entry ent;
+ unsigned int cpu;
+ int state;
+ int idx;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_cpuhp_enter {};
+
+struct trace_event_data_offsets_cpuhp_multi_enter {};
+
+struct trace_event_data_offsets_cpuhp_exit {};
+
+typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int));
+
+typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *);
+
+typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int);
+
+struct cpuhp_cpu_state {
+ enum cpuhp_state state;
+ enum cpuhp_state target;
+ enum cpuhp_state fail;
+ struct task_struct *thread;
+ bool should_run;
+ bool rollback;
+ bool single;
+ bool bringup;
+ struct hlist_node *node;
+ struct hlist_node *last;
+ enum cpuhp_state cb_state;
+ int result;
+ struct completion done_up;
+ struct completion done_down;
+};
+
+struct cpuhp_step {
+ const char *name;
+ union {
+ int (*single)(unsigned int);
+ int (*multi)(unsigned int, struct hlist_node *);
+ } startup;
+ union {
+ int (*single)(unsigned int);
+ int (*multi)(unsigned int, struct hlist_node *);
+ } teardown;
+ struct hlist_head list;
+ bool cant_stop;
+ bool multi_instance;
+};
+
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF = 0,
+ CPU_MITIGATIONS_AUTO = 1,
+ CPU_MITIGATIONS_AUTO_NOSMT = 2,
+};
+
+typedef enum cpuhp_state pto_T_____18;
+
+struct __kernel_old_timeval {
+ __kernel_long_t tv_sec;
+ __kernel_long_t tv_usec;
+};
+
+struct wait_queue_entry;
+
+typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *);
+
+struct wait_queue_entry {
+ unsigned int flags;
+ void *private;
+ wait_queue_func_t func;
+ struct list_head entry;
+};
+
+typedef struct wait_queue_entry wait_queue_entry_t;
+
+struct rusage {
+ struct __kernel_old_timeval ru_utime;
+ struct __kernel_old_timeval ru_stime;
+ __kernel_long_t ru_maxrss;
+ __kernel_long_t ru_ixrss;
+ __kernel_long_t ru_idrss;
+ __kernel_long_t ru_isrss;
+ __kernel_long_t ru_minflt;
+ __kernel_long_t ru_majflt;
+ __kernel_long_t ru_nswap;
+ __kernel_long_t ru_inblock;
+ __kernel_long_t ru_oublock;
+ __kernel_long_t ru_msgsnd;
+ __kernel_long_t ru_msgrcv;
+ __kernel_long_t ru_nsignals;
+ __kernel_long_t ru_nvcsw;
+ __kernel_long_t ru_nivcsw;
+};
+
+struct fd {
+ struct file *file;
+ unsigned int flags;
+};
+
+struct waitid_info {
+ pid_t pid;
+ uid_t uid;
+ int status;
+ int cause;
+};
+
+struct wait_opts {
+ enum pid_type wo_type;
+ int wo_flags;
+ struct pid *wo_pid;
+ struct waitid_info *wo_info;
+ int wo_stat;
+ struct rusage *wo_rusage;
+ wait_queue_entry_t child_wait;
+ int notask_error;
+};
+
+struct softirq_action {
+ void (*action)(struct softirq_action *);
+};
+
+struct tasklet_struct {
+ struct tasklet_struct *next;
+ long unsigned int state;
+ atomic_t count;
+ void (*func)(long unsigned int);
+ long unsigned int data;
+};
+
+enum {
+ TASKLET_STATE_SCHED = 0,
+ TASKLET_STATE_RUN = 1,
+};
+
+struct trace_event_raw_irq_handler_entry {
+ struct trace_entry ent;
+ int irq;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_irq_handler_exit {
+ struct trace_entry ent;
+ int irq;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_softirq {
+ struct trace_entry ent;
+ unsigned int vec;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_irq_handler_entry {
+ u32 name;
+};
+
+struct trace_event_data_offsets_irq_handler_exit {};
+
+struct trace_event_data_offsets_softirq {};
+
+typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *);
+
+typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int);
+
+typedef void (*btf_trace_softirq_entry)(void *, unsigned int);
+
+typedef void (*btf_trace_softirq_exit)(void *, unsigned int);
+
+typedef void (*btf_trace_softirq_raise)(void *, unsigned int);
+
+struct tasklet_head {
+ struct tasklet_struct *head;
+ struct tasklet_struct **tail;
+};
+
+typedef struct tasklet_struct **pto_T_____19;
+
+typedef void (*dr_release_t)(struct device *, void *);
+
+struct resource_entry {
+ struct list_head node;
+ struct resource *res;
+ resource_size_t offset;
+ struct resource __res;
+};
+
+struct resource_constraint {
+ resource_size_t min;
+ resource_size_t max;
+ resource_size_t align;
+ resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t);
+ void *alignf_data;
+};
+
+enum {
+ MAX_IORES_LEVEL = 5,
+};
+
+struct region_devres {
+ struct resource *parent;
+ resource_size_t start;
+ resource_size_t n;
+};
+
+enum sysctl_writes_mode {
+ SYSCTL_WRITES_LEGACY = -1,
+ SYSCTL_WRITES_WARN = 0,
+ SYSCTL_WRITES_STRICT = 1,
+};
+
+struct do_proc_dointvec_minmax_conv_param {
+ int *min;
+ int *max;
+};
+
+struct do_proc_douintvec_minmax_conv_param {
+ unsigned int *min;
+ unsigned int *max;
+};
+
+struct __sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ size_t *oldlenp;
+ void *newval;
+ size_t newlen;
+ long unsigned int __unused[4];
+};
+
+enum {
+ CTL_KERN = 1,
+ CTL_VM = 2,
+ CTL_NET = 3,
+ CTL_PROC = 4,
+ CTL_FS = 5,
+ CTL_DEBUG = 6,
+ CTL_DEV = 7,
+ CTL_BUS = 8,
+ CTL_ABI = 9,
+ CTL_CPU = 10,
+ CTL_ARLAN = 254,
+ CTL_S390DBF = 5677,
+ CTL_SUNRPC = 7249,
+ CTL_PM = 9899,
+ CTL_FRV = 9898,
+};
+
+enum {
+ KERN_OSTYPE = 1,
+ KERN_OSRELEASE = 2,
+ KERN_OSREV = 3,
+ KERN_VERSION = 4,
+ KERN_SECUREMASK = 5,
+ KERN_PROF = 6,
+ KERN_NODENAME = 7,
+ KERN_DOMAINNAME = 8,
+ KERN_PANIC = 15,
+ KERN_REALROOTDEV = 16,
+ KERN_SPARC_REBOOT = 21,
+ KERN_CTLALTDEL = 22,
+ KERN_PRINTK = 23,
+ KERN_NAMETRANS = 24,
+ KERN_PPC_HTABRECLAIM = 25,
+ KERN_PPC_ZEROPAGED = 26,
+ KERN_PPC_POWERSAVE_NAP = 27,
+ KERN_MODPROBE = 28,
+ KERN_SG_BIG_BUFF = 29,
+ KERN_ACCT = 30,
+ KERN_PPC_L2CR = 31,
+ KERN_RTSIGNR = 32,
+ KERN_RTSIGMAX = 33,
+ KERN_SHMMAX = 34,
+ KERN_MSGMAX = 35,
+ KERN_MSGMNB = 36,
+ KERN_MSGPOOL = 37,
+ KERN_SYSRQ = 38,
+ KERN_MAX_THREADS = 39,
+ KERN_RANDOM = 40,
+ KERN_SHMALL = 41,
+ KERN_MSGMNI = 42,
+ KERN_SEM = 43,
+ KERN_SPARC_STOP_A = 44,
+ KERN_SHMMNI = 45,
+ KERN_OVERFLOWUID = 46,
+ KERN_OVERFLOWGID = 47,
+ KERN_SHMPATH = 48,
+ KERN_HOTPLUG = 49,
+ KERN_IEEE_EMULATION_WARNINGS = 50,
+ KERN_S390_USER_DEBUG_LOGGING = 51,
+ KERN_CORE_USES_PID = 52,
+ KERN_TAINTED = 53,
+ KERN_CADPID = 54,
+ KERN_PIDMAX = 55,
+ KERN_CORE_PATTERN = 56,
+ KERN_PANIC_ON_OOPS = 57,
+ KERN_HPPA_PWRSW = 58,
+ KERN_HPPA_UNALIGNED = 59,
+ KERN_PRINTK_RATELIMIT = 60,
+ KERN_PRINTK_RATELIMIT_BURST = 61,
+ KERN_PTY = 62,
+ KERN_NGROUPS_MAX = 63,
+ KERN_SPARC_SCONS_PWROFF = 64,
+ KERN_HZ_TIMER = 65,
+ KERN_UNKNOWN_NMI_PANIC = 66,
+ KERN_BOOTLOADER_TYPE = 67,
+ KERN_RANDOMIZE = 68,
+ KERN_SETUID_DUMPABLE = 69,
+ KERN_SPIN_RETRY = 70,
+ KERN_ACPI_VIDEO_FLAGS = 71,
+ KERN_IA64_UNALIGNED = 72,
+ KERN_COMPAT_LOG = 73,
+ KERN_MAX_LOCK_DEPTH = 74,
+ KERN_NMI_WATCHDOG = 75,
+ KERN_PANIC_ON_NMI = 76,
+ KERN_PANIC_ON_WARN = 77,
+ KERN_PANIC_PRINT = 78,
+};
+
+struct xfs_sysctl_val {
+ int min;
+ int val;
+ int max;
+};
+
+typedef struct xfs_sysctl_val xfs_sysctl_val_t;
+
+struct xfs_param {
+ xfs_sysctl_val_t sgid_inherit;
+ xfs_sysctl_val_t symlink_mode;
+ xfs_sysctl_val_t panic_mask;
+ xfs_sysctl_val_t error_level;
+ xfs_sysctl_val_t syncd_timer;
+ xfs_sysctl_val_t stats_clear;
+ xfs_sysctl_val_t inherit_sync;
+ xfs_sysctl_val_t inherit_nodump;
+ xfs_sysctl_val_t inherit_noatim;
+ xfs_sysctl_val_t xfs_buf_timer;
+ xfs_sysctl_val_t xfs_buf_age;
+ xfs_sysctl_val_t inherit_nosym;
+ xfs_sysctl_val_t rotorstep;
+ xfs_sysctl_val_t inherit_nodfrg;
+ xfs_sysctl_val_t fstrm_timer;
+ xfs_sysctl_val_t eofb_timer;
+ xfs_sysctl_val_t cowb_timer;
+};
+
+typedef struct xfs_param xfs_param_t;
+
+struct xfs_globals {
+ int log_recovery_delay;
+ int mount_delay;
+ bool bug_on_assert;
+ bool always_cow;
+};
+
+enum ethtool_link_mode_bit_indices {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5,
+ ETHTOOL_LINK_MODE_Autoneg_BIT = 6,
+ ETHTOOL_LINK_MODE_TP_BIT = 7,
+ ETHTOOL_LINK_MODE_AUI_BIT = 8,
+ ETHTOOL_LINK_MODE_MII_BIT = 9,
+ ETHTOOL_LINK_MODE_FIBRE_BIT = 10,
+ ETHTOOL_LINK_MODE_BNC_BIT = 11,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12,
+ ETHTOOL_LINK_MODE_Pause_BIT = 13,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15,
+ ETHTOOL_LINK_MODE_Backplane_BIT = 16,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27,
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
+ ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
+ ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67,
+ ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
+ __ETHTOOL_LINK_MODE_MASK_NBITS = 75,
+};
+
+enum {
+ NAPI_STATE_SCHED = 0,
+ NAPI_STATE_MISSED = 1,
+ NAPI_STATE_DISABLE = 2,
+ NAPI_STATE_NPSVC = 3,
+ NAPI_STATE_HASHED = 4,
+ NAPI_STATE_NO_BUSY_POLL = 5,
+ NAPI_STATE_IN_BUSY_POLL = 6,
+};
+
+enum {
+ NETIF_MSG_DRV_BIT = 0,
+ NETIF_MSG_PROBE_BIT = 1,
+ NETIF_MSG_LINK_BIT = 2,
+ NETIF_MSG_TIMER_BIT = 3,
+ NETIF_MSG_IFDOWN_BIT = 4,
+ NETIF_MSG_IFUP_BIT = 5,
+ NETIF_MSG_RX_ERR_BIT = 6,
+ NETIF_MSG_TX_ERR_BIT = 7,
+ NETIF_MSG_TX_QUEUED_BIT = 8,
+ NETIF_MSG_INTR_BIT = 9,
+ NETIF_MSG_TX_DONE_BIT = 10,
+ NETIF_MSG_RX_STATUS_BIT = 11,
+ NETIF_MSG_PKTDATA_BIT = 12,
+ NETIF_MSG_HW_BIT = 13,
+ NETIF_MSG_WOL_BIT = 14,
+ NETIF_MSG_CLASS_COUNT = 15,
+};
+
+struct __user_cap_header_struct {
+ __u32 version;
+ int pid;
+};
+
+typedef struct __user_cap_header_struct *cap_user_header_t;
+
+struct __user_cap_data_struct {
+ __u32 effective;
+ __u32 permitted;
+ __u32 inheritable;
+};
+
+typedef struct __user_cap_data_struct *cap_user_data_t;
+
+typedef struct siginfo siginfo_t;
+
+struct sigqueue {
+ struct list_head list;
+ int flags;
+ kernel_siginfo_t info;
+ struct user_struct *user;
+};
+
+struct ptrace_peeksiginfo_args {
+ __u64 off;
+ __u32 flags;
+ __s32 nr;
+};
+
+struct ptrace_syscall_info {
+ __u8 op;
+ __u32 arch;
+ __u64 instruction_pointer;
+ __u64 stack_pointer;
+ union {
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ } entry;
+ struct {
+ __s64 rval;
+ __u8 is_error;
+ } exit;
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ __u32 ret_data;
+ } seccomp;
+ };
+};
+
+typedef long unsigned int old_sigset_t;
+
+enum siginfo_layout {
+ SIL_KILL = 0,
+ SIL_TIMER = 1,
+ SIL_POLL = 2,
+ SIL_FAULT = 3,
+ SIL_FAULT_MCEERR = 4,
+ SIL_FAULT_BNDERR = 5,
+ SIL_FAULT_PKUERR = 6,
+ SIL_CHLD = 7,
+ SIL_RT = 8,
+ SIL_SYS = 9,
+};
+
+enum {
+ TRACE_SIGNAL_DELIVERED = 0,
+ TRACE_SIGNAL_IGNORED = 1,
+ TRACE_SIGNAL_ALREADY_PENDING = 2,
+ TRACE_SIGNAL_OVERFLOW_FAIL = 3,
+ TRACE_SIGNAL_LOSE_INFO = 4,
+};
+
+struct trace_event_raw_signal_generate {
+ struct trace_entry ent;
+ int sig;
+ int errno;
+ int code;
+ char comm[16];
+ pid_t pid;
+ int group;
+ int result;
+ char __data[0];
+};
+
+struct trace_event_raw_signal_deliver {
+ struct trace_entry ent;
+ int sig;
+ int errno;
+ int code;
+ long unsigned int sa_handler;
+ long unsigned int sa_flags;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_signal_generate {};
+
+struct trace_event_data_offsets_signal_deliver {};
+
+typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int);
+
+typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *);
+
+struct sysinfo {
+ __kernel_long_t uptime;
+ __kernel_ulong_t loads[3];
+ __kernel_ulong_t totalram;
+ __kernel_ulong_t freeram;
+ __kernel_ulong_t sharedram;
+ __kernel_ulong_t bufferram;
+ __kernel_ulong_t totalswap;
+ __kernel_ulong_t freeswap;
+ __u16 procs;
+ __u16 pad;
+ __kernel_ulong_t totalhigh;
+ __kernel_ulong_t freehigh;
+ __u32 mem_unit;
+ char _f[0];
+};
+
+enum {
+ PER_LINUX = 0,
+ PER_LINUX_32BIT = 8388608,
+ PER_LINUX_FDPIC = 524288,
+ PER_SVR4 = 68157441,
+ PER_SVR3 = 83886082,
+ PER_SCOSVR3 = 117440515,
+ PER_OSR5 = 100663299,
+ PER_WYSEV386 = 83886084,
+ PER_ISCR4 = 67108869,
+ PER_BSD = 6,
+ PER_SUNOS = 67108870,
+ PER_XENIX = 83886087,
+ PER_LINUX32 = 8,
+ PER_LINUX32_3GB = 134217736,
+ PER_IRIX32 = 67108873,
+ PER_IRIXN32 = 67108874,
+ PER_IRIX64 = 67108875,
+ PER_RISCOS = 12,
+ PER_SOLARIS = 67108877,
+ PER_UW7 = 68157454,
+ PER_OSF4 = 15,
+ PER_HPUX = 16,
+ PER_MASK = 255,
+};
+
+struct rlimit64 {
+ __u64 rlim_cur;
+ __u64 rlim_max;
+};
+
+struct oldold_utsname {
+ char sysname[9];
+ char nodename[9];
+ char release[9];
+ char version[9];
+ char machine[9];
+};
+
+struct old_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+};
+
+enum uts_proc {
+ UTS_PROC_OSTYPE = 0,
+ UTS_PROC_OSRELEASE = 1,
+ UTS_PROC_VERSION = 2,
+ UTS_PROC_HOSTNAME = 3,
+ UTS_PROC_DOMAINNAME = 4,
+};
+
+struct prctl_mm_map {
+ __u64 start_code;
+ __u64 end_code;
+ __u64 start_data;
+ __u64 end_data;
+ __u64 start_brk;
+ __u64 brk;
+ __u64 start_stack;
+ __u64 arg_start;
+ __u64 arg_end;
+ __u64 env_start;
+ __u64 env_end;
+ __u64 *auxv;
+ __u32 auxv_size;
+ __u32 exe_fd;
+};
+
+struct tms {
+ __kernel_clock_t tms_utime;
+ __kernel_clock_t tms_stime;
+ __kernel_clock_t tms_cutime;
+ __kernel_clock_t tms_cstime;
+};
+
+struct getcpu_cache {
+ long unsigned int blob[16];
+};
+
+struct umh_info {
+ const char *cmdline;
+ struct file *pipe_to_umh;
+ struct file *pipe_from_umh;
+ struct list_head list;
+ void (*cleanup)(struct umh_info *);
+ pid_t pid;
+};
+
+struct wq_flusher;
+
+struct worker;
+
+struct workqueue_attrs;
+
+struct pool_workqueue;
+
+struct wq_device;
+
+struct workqueue_struct {
+ struct list_head pwqs;
+ struct list_head list;
+ struct mutex mutex;
+ int work_color;
+ int flush_color;
+ atomic_t nr_pwqs_to_flush;
+ struct wq_flusher *first_flusher;
+ struct list_head flusher_queue;
+ struct list_head flusher_overflow;
+ struct list_head maydays;
+ struct worker *rescuer;
+ int nr_drainers;
+ int saved_max_active;
+ struct workqueue_attrs *unbound_attrs;
+ struct pool_workqueue *dfl_pwq;
+ struct wq_device *wq_dev;
+ char *lock_name;
+ struct lock_class_key key;
+ struct lockdep_map lockdep_map;
+ char name[24];
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ unsigned int flags;
+ struct pool_workqueue *cpu_pwqs;
+ struct pool_workqueue *numa_pwq_tbl[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct workqueue_attrs {
+ int nice;
+ cpumask_var_t cpumask;
+ bool no_numa;
+};
+
+struct execute_work {
+ struct work_struct work;
+};
+
+enum {
+ WQ_UNBOUND = 2,
+ WQ_FREEZABLE = 4,
+ WQ_MEM_RECLAIM = 8,
+ WQ_HIGHPRI = 16,
+ WQ_CPU_INTENSIVE = 32,
+ WQ_SYSFS = 64,
+ WQ_POWER_EFFICIENT = 128,
+ __WQ_DRAINING = 65536,
+ __WQ_ORDERED = 131072,
+ __WQ_LEGACY = 262144,
+ __WQ_ORDERED_EXPLICIT = 524288,
+ WQ_MAX_ACTIVE = 512,
+ WQ_MAX_UNBOUND_PER_CPU = 4,
+ WQ_DFL_ACTIVE = 256,
+};
+
+typedef unsigned int xa_mark_t;
+
+enum xa_lock_type {
+ XA_LOCK_IRQ = 1,
+ XA_LOCK_BH = 2,
+};
+
+struct __una_u32 {
+ u32 x;
+};
+
+struct worker_pool;
+
+struct worker {
+ union {
+ struct list_head entry;
+ struct hlist_node hentry;
+ };
+ struct work_struct *current_work;
+ work_func_t current_func;
+ struct pool_workqueue *current_pwq;
+ struct list_head scheduled;
+ struct task_struct *task;
+ struct worker_pool *pool;
+ struct list_head node;
+ long unsigned int last_active;
+ unsigned int flags;
+ int id;
+ int sleeping;
+ char desc[24];
+ struct workqueue_struct *rescue_wq;
+ work_func_t last_func;
+};
+
+struct pool_workqueue {
+ struct worker_pool *pool;
+ struct workqueue_struct *wq;
+ int work_color;
+ int flush_color;
+ int refcnt;
+ int nr_in_flight[15];
+ int nr_active;
+ int max_active;
+ struct list_head delayed_works;
+ struct list_head pwqs_node;
+ struct list_head mayday_node;
+ struct work_struct unbound_release_work;
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct worker_pool {
+ raw_spinlock_t lock;
+ int cpu;
+ int node;
+ int id;
+ unsigned int flags;
+ long unsigned int watchdog_ts;
+ struct list_head worklist;
+ int nr_workers;
+ int nr_idle;
+ struct list_head idle_list;
+ struct timer_list idle_timer;
+ struct timer_list mayday_timer;
+ struct hlist_head busy_hash[64];
+ struct worker *manager;
+ struct list_head workers;
+ struct completion *detach_completion;
+ struct ida worker_ida;
+ struct workqueue_attrs *attrs;
+ struct hlist_node hash_node;
+ int refcnt;
+ long: 32;
+ long: 64;
+ long: 64;
+ atomic_t nr_running;
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ POOL_MANAGER_ACTIVE = 1,
+ POOL_DISASSOCIATED = 4,
+ WORKER_DIE = 2,
+ WORKER_IDLE = 4,
+ WORKER_PREP = 8,
+ WORKER_CPU_INTENSIVE = 64,
+ WORKER_UNBOUND = 128,
+ WORKER_REBOUND = 256,
+ WORKER_NOT_RUNNING = 456,
+ NR_STD_WORKER_POOLS = 2,
+ UNBOUND_POOL_HASH_ORDER = 6,
+ BUSY_WORKER_HASH_ORDER = 6,
+ MAX_IDLE_WORKERS_RATIO = 4,
+ IDLE_WORKER_TIMEOUT = 75000,
+ MAYDAY_INITIAL_TIMEOUT = 2,
+ MAYDAY_INTERVAL = 25,
+ CREATE_COOLDOWN = 250,
+ RESCUER_NICE_LEVEL = -20,
+ HIGHPRI_NICE_LEVEL = -20,
+ WQ_NAME_LEN = 24,
+};
+
+struct wq_flusher {
+ struct list_head list;
+ int flush_color;
+ struct completion done;
+};
+
+struct wq_device {
+ struct workqueue_struct *wq;
+ struct device dev;
+};
+
+struct trace_event_raw_workqueue_queue_work {
+ struct trace_entry ent;
+ void *work;
+ void *function;
+ void *workqueue;
+ unsigned int req_cpu;
+ unsigned int cpu;
+ char __data[0];
+};
+
+struct trace_event_raw_workqueue_activate_work {
+ struct trace_entry ent;
+ void *work;
+ char __data[0];
+};
+
+struct trace_event_raw_workqueue_execute_start {
+ struct trace_entry ent;
+ void *work;
+ void *function;
+ char __data[0];
+};
+
+struct trace_event_raw_workqueue_execute_end {
+ struct trace_entry ent;
+ void *work;
+ void *function;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_workqueue_queue_work {};
+
+struct trace_event_data_offsets_workqueue_activate_work {};
+
+struct trace_event_data_offsets_workqueue_execute_start {};
+
+struct trace_event_data_offsets_workqueue_execute_end {};
+
+typedef void (*btf_trace_workqueue_queue_work)(void *, unsigned int, struct pool_workqueue *, struct work_struct *);
+
+typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *);
+
+typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *);
+
+typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t);
+
+struct wq_barrier {
+ struct work_struct work;
+ struct completion done;
+ struct task_struct *task;
+};
+
+struct cwt_wait {
+ wait_queue_entry_t wait;
+ struct work_struct *work;
+};
+
+struct apply_wqattrs_ctx {
+ struct workqueue_struct *wq;
+ struct workqueue_attrs *attrs;
+ struct list_head list;
+ struct pool_workqueue *dfl_pwq;
+ struct pool_workqueue *pwq_tbl[0];
+};
+
+struct work_for_cpu {
+ struct work_struct work;
+ long int (*fn)(void *);
+ void *arg;
+ long int ret;
+};
+
+typedef void (*task_work_func_t)(struct callback_head *);
+
+enum {
+ KERNEL_PARAM_OPS_FL_NOARG = 1,
+};
+
+enum {
+ KERNEL_PARAM_FL_UNSAFE = 1,
+ KERNEL_PARAM_FL_HWPARAM = 2,
+};
+
+struct param_attribute {
+ struct module_attribute mattr;
+ const struct kernel_param *param;
+};
+
+struct module_param_attrs {
+ unsigned int num;
+ struct attribute_group grp;
+ struct param_attribute attrs[0];
+};
+
+struct module_version_attribute {
+ struct module_attribute mattr;
+ const char *module_name;
+ const char *version;
+};
+
+struct kmalloced_param {
+ struct list_head list;
+ char val[0];
+};
+
+struct sched_param {
+ int sched_priority;
+};
+
+enum {
+ __PERCPU_REF_ATOMIC = 1,
+ __PERCPU_REF_DEAD = 2,
+ __PERCPU_REF_ATOMIC_DEAD = 3,
+ __PERCPU_REF_FLAG_BITS = 2,
+};
+
+struct kthread_work;
+
+typedef void (*kthread_work_func_t)(struct kthread_work *);
+
+struct kthread_worker;
+
+struct kthread_work {
+ struct list_head node;
+ kthread_work_func_t func;
+ struct kthread_worker *worker;
+ int canceling;
+};
+
+enum {
+ KTW_FREEZABLE = 1,
+};
+
+struct kthread_worker {
+ unsigned int flags;
+ raw_spinlock_t lock;
+ struct list_head work_list;
+ struct list_head delayed_work_list;
+ struct task_struct *task;
+ struct kthread_work *current_work;
+};
+
+struct kthread_delayed_work {
+ struct kthread_work work;
+ struct timer_list timer;
+};
+
+enum {
+ CSS_NO_REF = 1,
+ CSS_ONLINE = 2,
+ CSS_RELEASED = 4,
+ CSS_VISIBLE = 8,
+ CSS_DYING = 16,
+};
+
+struct kthread_create_info {
+ int (*threadfn)(void *);
+ void *data;
+ int node;
+ struct task_struct *result;
+ struct completion *done;
+ struct list_head list;
+};
+
+struct kthread {
+ long unsigned int flags;
+ unsigned int cpu;
+ int (*threadfn)(void *);
+ void *data;
+ mm_segment_t oldfs;
+ struct completion parked;
+ struct completion exited;
+ struct cgroup_subsys_state *blkcg_css;
+};
+
+enum KTHREAD_BITS {
+ KTHREAD_IS_PER_CPU = 0,
+ KTHREAD_SHOULD_STOP = 1,
+ KTHREAD_SHOULD_PARK = 2,
+};
+
+struct kthread_flush_work {
+ struct kthread_work work;
+ struct completion done;
+};
+
+struct pt_regs___2;
+
+struct ipc_ids {
+ int in_use;
+ short unsigned int seq;
+ struct rw_semaphore rwsem;
+ struct idr ipcs_idr;
+ int max_idx;
+ int last_idx;
+ struct rhashtable key_ht;
+};
+
+struct ipc_namespace {
+ refcount_t count;
+ struct ipc_ids ids[3];
+ int sem_ctls[4];
+ int used_sems;
+ unsigned int msg_ctlmax;
+ unsigned int msg_ctlmnb;
+ unsigned int msg_ctlmni;
+ atomic_t msg_bytes;
+ atomic_t msg_hdrs;
+ size_t shm_ctlmax;
+ size_t shm_ctlall;
+ long unsigned int shm_tot;
+ int shm_ctlmni;
+ int shm_rmid_forced;
+ struct notifier_block ipcns_nb;
+ struct vfsmount *mq_mnt;
+ unsigned int mq_queues_count;
+ unsigned int mq_queues_max;
+ unsigned int mq_msg_max;
+ unsigned int mq_msgsize_max;
+ unsigned int mq_msg_default;
+ unsigned int mq_msgsize_default;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+ struct ns_common ns;
+};
+
+struct srcu_notifier_head {
+ struct mutex mutex;
+ struct srcu_struct srcu;
+ struct notifier_block *head;
+};
+
+enum what {
+ PROC_EVENT_NONE = 0,
+ PROC_EVENT_FORK = 1,
+ PROC_EVENT_EXEC = 2,
+ PROC_EVENT_UID = 4,
+ PROC_EVENT_GID = 64,
+ PROC_EVENT_SID = 128,
+ PROC_EVENT_PTRACE = 256,
+ PROC_EVENT_COMM = 512,
+ PROC_EVENT_COREDUMP = 1073741824,
+ PROC_EVENT_EXIT = -2147483648,
+};
+
+typedef u64 async_cookie_t;
+
+typedef void (*async_func_t)(void *, async_cookie_t);
+
+struct async_domain {
+ struct list_head pending;
+ unsigned int registered: 1;
+};
+
+struct async_entry {
+ struct list_head domain_list;
+ struct list_head global_list;
+ struct work_struct work;
+ async_cookie_t cookie;
+ async_func_t func;
+ void *data;
+ struct async_domain *domain;
+};
+
+struct smpboot_thread_data {
+ unsigned int cpu;
+ unsigned int status;
+ struct smp_hotplug_thread *ht;
+};
+
+enum {
+ HP_THREAD_NONE = 0,
+ HP_THREAD_ACTIVE = 1,
+ HP_THREAD_PARKED = 2,
+};
+
+struct pin_cookie {
+ unsigned int val;
+};
+
+enum {
+ CSD_FLAG_LOCK = 1,
+ CSD_TYPE_ASYNC = 0,
+ CSD_TYPE_SYNC = 16,
+ CSD_TYPE_IRQ_WORK = 32,
+ CSD_TYPE_TTWU = 48,
+ CSD_FLAG_TYPE_MASK = 240,
+};
+
+struct dl_bw {
+ raw_spinlock_t lock;
+ u64 bw;
+ u64 total_bw;
+};
+
+struct cpudl_item;
+
+struct cpudl {
+ raw_spinlock_t lock;
+ int size;
+ cpumask_var_t free_cpus;
+ struct cpudl_item *elements;
+};
+
+struct cpupri_vec {
+ atomic_t count;
+ cpumask_var_t mask;
+};
+
+struct cpupri {
+ struct cpupri_vec pri_to_cpu[102];
+ int *cpu_to_pri;
+};
+
+struct perf_domain;
+
+struct root_domain___2 {
+ atomic_t refcount;
+ atomic_t rto_count;
+ struct callback_head rcu;
+ cpumask_var_t span;
+ cpumask_var_t online;
+ int overload;
+ int overutilized;
+ cpumask_var_t dlo_mask;
+ atomic_t dlo_count;
+ struct dl_bw dl_bw;
+ struct cpudl cpudl;
+ struct irq_work rto_push_work;
+ raw_spinlock_t rto_lock;
+ int rto_loop;
+ int rto_cpu;
+ atomic_t rto_loop_next;
+ atomic_t rto_loop_start;
+ cpumask_var_t rto_mask;
+ struct cpupri cpupri;
+ long unsigned int max_cpu_capacity;
+ struct perf_domain *pd;
+};
+
+struct cfs_rq {
+ struct load_weight load;
+ unsigned int nr_running;
+ unsigned int h_nr_running;
+ unsigned int idle_h_nr_running;
+ u64 exec_clock;
+ u64 min_vruntime;
+ struct rb_root_cached tasks_timeline;
+ struct sched_entity *curr;
+ struct sched_entity *next;
+ struct sched_entity *last;
+ struct sched_entity *skip;
+ unsigned int nr_spread_over;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct sched_avg avg;
+ struct {
+ raw_spinlock_t lock;
+ int nr;
+ long unsigned int load_avg;
+ long unsigned int util_avg;
+ long unsigned int runnable_avg;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ } removed;
+ long unsigned int tg_load_avg_contrib;
+ long int propagate;
+ long int prop_runnable_sum;
+ long unsigned int h_load;
+ u64 last_h_load_update;
+ struct sched_entity *h_load_next;
+ struct rq *rq;
+ int on_list;
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg;
+ int runtime_enabled;
+ s64 runtime_remaining;
+ u64 throttled_clock;
+ u64 throttled_clock_task;
+ u64 throttled_clock_task_time;
+ int throttled;
+ int throttle_count;
+ struct list_head throttled_list;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct cfs_bandwidth {
+ raw_spinlock_t lock;
+ ktime_t period;
+ u64 quota;
+ u64 runtime;
+ s64 hierarchical_quota;
+ u8 idle;
+ u8 period_active;
+ u8 slack_started;
+ struct hrtimer period_timer;
+ struct hrtimer slack_timer;
+ struct list_head throttled_cfs_rq;
+ int nr_periods;
+ int nr_throttled;
+ u64 throttled_time;
+};
+
+struct task_group {
+ struct cgroup_subsys_state css;
+ struct sched_entity **se;
+ struct cfs_rq **cfs_rq;
+ long unsigned int shares;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ atomic_long_t load_avg;
+ struct callback_head rcu;
+ struct list_head list;
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
+ struct cfs_bandwidth cfs_bandwidth;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct update_util_data {
+ void (*func)(struct update_util_data *, u64, unsigned int);
+};
+
+enum {
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2,
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4,
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32,
+};
+
+struct sched_group {
+ struct sched_group *next;
+ atomic_t ref;
+ unsigned int group_weight;
+ struct sched_group_capacity *sgc;
+ int asym_prefer_cpu;
+ long unsigned int cpumask[0];
+};
+
+struct sched_group_capacity {
+ atomic_t ref;
+ long unsigned int capacity;
+ long unsigned int min_capacity;
+ long unsigned int max_capacity;
+ long unsigned int next_update;
+ int imbalance;
+ int id;
+ long unsigned int cpumask[0];
+};
+
+struct wake_q_head {
+ struct wake_q_node *first;
+ struct wake_q_node **lastp;
+};
+
+struct sched_attr {
+ __u32 size;
+ __u32 sched_policy;
+ __u64 sched_flags;
+ __s32 sched_nice;
+ __u32 sched_priority;
+ __u64 sched_runtime;
+ __u64 sched_deadline;
+ __u64 sched_period;
+ __u32 sched_util_min;
+ __u32 sched_util_max;
+};
+
+struct cpuidle_driver___2;
+
+struct cpuidle_state {
+ char name[16];
+ char desc[32];
+ u64 exit_latency_ns;
+ u64 target_residency_ns;
+ unsigned int flags;
+ unsigned int exit_latency;
+ int power_usage;
+ unsigned int target_residency;
+ int (*enter)(struct cpuidle_device *, struct cpuidle_driver___2 *, int);
+ int (*enter_dead)(struct cpuidle_device *, int);
+ void (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver___2 *, int);
+};
+
+struct cpuidle_driver___2 {
+ const char *name;
+ struct module *owner;
+ unsigned int bctimer: 1;
+ struct cpuidle_state states[10];
+ int state_count;
+ int safe_state_index;
+ struct cpumask *cpumask;
+ const char *governor;
+};
+
+struct em_cap_state {
+ long unsigned int frequency;
+ long unsigned int power;
+ long unsigned int cost;
+};
+
+struct em_perf_domain {
+ struct em_cap_state *table;
+ int nr_cap_states;
+ long unsigned int cpus[0];
+};
+
+enum {
+ CFTYPE_ONLY_ON_ROOT = 1,
+ CFTYPE_NOT_ON_ROOT = 2,
+ CFTYPE_NS_DELEGATABLE = 4,
+ CFTYPE_NO_PREFIX = 8,
+ CFTYPE_WORLD_WRITABLE = 16,
+ CFTYPE_DEBUG = 32,
+ __CFTYPE_ONLY_ON_DFL = 65536,
+ __CFTYPE_NOT_ON_DFL = 131072,
+};
+
+typedef int (*cpu_stop_fn_t)(void *);
+
+struct cpu_stop_done;
+
+struct cpu_stop_work {
+ struct list_head list;
+ cpu_stop_fn_t fn;
+ void *arg;
+ struct cpu_stop_done *done;
+};
+
+struct cpudl_item {
+ u64 dl;
+ int cpu;
+ int idx;
+};
+
+struct rt_prio_array {
+ long unsigned int bitmap[2];
+ struct list_head queue[100];
+};
+
+struct rt_bandwidth {
+ raw_spinlock_t rt_runtime_lock;
+ ktime_t rt_period;
+ u64 rt_runtime;
+ struct hrtimer rt_period_timer;
+ unsigned int rt_period_active;
+};
+
+struct dl_bandwidth {
+ raw_spinlock_t dl_runtime_lock;
+ u64 dl_runtime;
+ u64 dl_period;
+};
+
+typedef int (*tg_visitor)(struct task_group *, void *);
+
+struct rt_rq {
+ struct rt_prio_array active;
+ unsigned int rt_nr_running;
+ unsigned int rr_nr_running;
+ struct {
+ int curr;
+ int next;
+ } highest_prio;
+ long unsigned int rt_nr_migratory;
+ long unsigned int rt_nr_total;
+ int overloaded;
+ struct plist_head pushable_tasks;
+ int rt_queued;
+ int rt_throttled;
+ u64 rt_time;
+ u64 rt_runtime;
+ raw_spinlock_t rt_runtime_lock;
+};
+
+struct dl_rq {
+ struct rb_root_cached root;
+ long unsigned int dl_nr_running;
+ struct {
+ u64 curr;
+ u64 next;
+ } earliest_dl;
+ long unsigned int dl_nr_migratory;
+ int overloaded;
+ struct rb_root_cached pushable_dl_tasks_root;
+ u64 running_bw;
+ u64 this_bw;
+ u64 extra_bw;
+ u64 bw_ratio;
+};
+
+struct rq {
+ raw_spinlock_t lock;
+ unsigned int nr_running;
+ long unsigned int last_blocked_load_update_tick;
+ unsigned int has_blocked_load;
+ long: 32;
+ long: 64;
+ call_single_data_t nohz_csd;
+ unsigned int nohz_tick_stopped;
+ atomic_t nohz_flags;
+ unsigned int ttwu_pending;
+ u64 nr_switches;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct cfs_rq cfs;
+ struct rt_rq rt;
+ struct dl_rq dl;
+ struct list_head leaf_cfs_rq_list;
+ struct list_head *tmp_alone_branch;
+ long unsigned int nr_uninterruptible;
+ struct task_struct *curr;
+ struct task_struct *idle;
+ struct task_struct *stop;
+ long unsigned int next_balance;
+ struct mm_struct *prev_mm;
+ unsigned int clock_update_flags;
+ u64 clock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u64 clock_task;
+ u64 clock_pelt;
+ long unsigned int lost_idle_time;
+ atomic_t nr_iowait;
+ int membarrier_state;
+ struct root_domain___2 *rd;
+ struct sched_domain *sd;
+ long unsigned int cpu_capacity;
+ long unsigned int cpu_capacity_orig;
+ struct callback_head *balance_callback;
+ unsigned char nohz_idle_balance;
+ unsigned char idle_balance;
+ long unsigned int misfit_task_load;
+ int active_balance;
+ int push_cpu;
+ struct cpu_stop_work active_balance_work;
+ int cpu;
+ int online;
+ struct list_head cfs_tasks;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct sched_avg avg_rt;
+ struct sched_avg avg_dl;
+ u64 idle_stamp;
+ u64 avg_idle;
+ u64 max_idle_balance_cost;
+ u64 prev_steal_time;
+ long unsigned int calc_load_update;
+ long int calc_load_active;
+ long: 64;
+ long: 64;
+ call_single_data_t hrtick_csd;
+ struct hrtimer hrtick_timer;
+ struct cpuidle_state *idle_state;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct perf_domain {
+ struct em_perf_domain *em_pd;
+ struct perf_domain *next;
+ struct callback_head rcu;
+};
+
+struct rq_flags {
+ long unsigned int flags;
+ struct pin_cookie cookie;
+ unsigned int clock_update_flags;
+};
+
+enum {
+ __SCHED_FEAT_GENTLE_FAIR_SLEEPERS = 0,
+ __SCHED_FEAT_START_DEBIT = 1,
+ __SCHED_FEAT_NEXT_BUDDY = 2,
+ __SCHED_FEAT_LAST_BUDDY = 3,
+ __SCHED_FEAT_CACHE_HOT_BUDDY = 4,
+ __SCHED_FEAT_WAKEUP_PREEMPTION = 5,
+ __SCHED_FEAT_HRTICK = 6,
+ __SCHED_FEAT_DOUBLE_TICK = 7,
+ __SCHED_FEAT_NONTASK_CAPACITY = 8,
+ __SCHED_FEAT_TTWU_QUEUE = 9,
+ __SCHED_FEAT_SIS_AVG_CPU = 10,
+ __SCHED_FEAT_SIS_PROP = 11,
+ __SCHED_FEAT_WARN_DOUBLE_CLOCK = 12,
+ __SCHED_FEAT_RT_PUSH_IPI = 13,
+ __SCHED_FEAT_RT_RUNTIME_SHARE = 14,
+ __SCHED_FEAT_LB_MIN = 15,
+ __SCHED_FEAT_ATTACH_AGE_LOAD = 16,
+ __SCHED_FEAT_WA_IDLE = 17,
+ __SCHED_FEAT_WA_WEIGHT = 18,
+ __SCHED_FEAT_WA_BIAS = 19,
+ __SCHED_FEAT_UTIL_EST = 20,
+ __SCHED_FEAT_UTIL_EST_FASTUP = 21,
+ __SCHED_FEAT_NR = 22,
+};
+
+struct trace_event_raw_sched_kthread_stop {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_kthread_stop_ret {
+ struct trace_entry ent;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_wakeup_template {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ int prio;
+ int success;
+ int target_cpu;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_switch {
+ struct trace_entry ent;
+ char prev_comm[16];
+ pid_t prev_pid;
+ int prev_prio;
+ long int prev_state;
+ char next_comm[16];
+ pid_t next_pid;
+ int next_prio;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_migrate_task {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ int prio;
+ int orig_cpu;
+ int dest_cpu;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_process_template {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ int prio;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_process_wait {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ int prio;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_process_fork {
+ struct trace_entry ent;
+ char parent_comm[16];
+ pid_t parent_pid;
+ char child_comm[16];
+ pid_t child_pid;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_process_exec {
+ struct trace_entry ent;
+ u32 __data_loc_filename;
+ pid_t pid;
+ pid_t old_pid;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_stat_runtime {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ u64 runtime;
+ u64 vruntime;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_pi_setprio {
+ struct trace_entry ent;
+ char comm[16];
+ pid_t pid;
+ int oldprio;
+ int newprio;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_move_numa {
+ struct trace_entry ent;
+ pid_t pid;
+ pid_t tgid;
+ pid_t ngid;
+ int src_cpu;
+ int src_nid;
+ int dst_cpu;
+ int dst_nid;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_numa_pair_template {
+ struct trace_entry ent;
+ pid_t src_pid;
+ pid_t src_tgid;
+ pid_t src_ngid;
+ int src_cpu;
+ int src_nid;
+ pid_t dst_pid;
+ pid_t dst_tgid;
+ pid_t dst_ngid;
+ int dst_cpu;
+ int dst_nid;
+ char __data[0];
+};
+
+struct trace_event_raw_sched_wake_idle_without_ipi {
+ struct trace_entry ent;
+ int cpu;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_sched_kthread_stop {};
+
+struct trace_event_data_offsets_sched_kthread_stop_ret {};
+
+struct trace_event_data_offsets_sched_wakeup_template {};
+
+struct trace_event_data_offsets_sched_switch {};
+
+struct trace_event_data_offsets_sched_migrate_task {};
+
+struct trace_event_data_offsets_sched_process_template {};
+
+struct trace_event_data_offsets_sched_process_wait {};
+
+struct trace_event_data_offsets_sched_process_fork {};
+
+struct trace_event_data_offsets_sched_process_exec {
+ u32 filename;
+};
+
+struct trace_event_data_offsets_sched_stat_runtime {};
+
+struct trace_event_data_offsets_sched_pi_setprio {};
+
+struct trace_event_data_offsets_sched_move_numa {};
+
+struct trace_event_data_offsets_sched_numa_pair_template {};
+
+struct trace_event_data_offsets_sched_wake_idle_without_ipi {};
+
+typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int);
+
+typedef void (*btf_trace_sched_waking)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *);
+
+typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int);
+
+typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *);
+
+typedef void (*btf_trace_sched_process_wait)(void *, struct pid *);
+
+typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *);
+
+typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *);
+
+typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64, u64);
+
+typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *);
+
+typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int);
+
+typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int);
+
+typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int);
+
+typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int);
+
+struct migration_arg {
+ struct task_struct *task;
+ int dest_cpu;
+};
+
+struct tick_work {
+ int cpu;
+ atomic_t state;
+ struct delayed_work work;
+};
+
+struct cfs_schedulable_data {
+ struct task_group *tg;
+ u64 period;
+ u64 quota;
+};
+
+enum {
+ cpuset = 0,
+ possible = 1,
+ fail = 2,
+};
+
+enum tick_dep_bits {
+ TICK_DEP_BIT_POSIX_TIMER = 0,
+ TICK_DEP_BIT_PERF_EVENTS = 1,
+ TICK_DEP_BIT_SCHED = 2,
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
+ TICK_DEP_BIT_RCU = 4,
+ TICK_DEP_BIT_RCU_EXP = 5,
+};
+
+struct sched_clock_data {
+ u64 tick_raw;
+ u64 tick_gtod;
+ u64 clock;
+};
+
+typedef u64 pao_T_____4;
+
+struct idle_timer {
+ struct hrtimer timer;
+ int done;
+};
+
+enum cpu_idle_type {
+ CPU_IDLE = 0,
+ CPU_NOT_IDLE = 1,
+ CPU_NEWLY_IDLE = 2,
+ CPU_MAX_IDLE_TYPES = 3,
+};
+
+enum schedutil_type {
+ FREQUENCY_UTIL = 0,
+ ENERGY_UTIL = 1,
+};
+
+enum fbq_type {
+ regular = 0,
+ remote = 1,
+ all = 2,
+};
+
+enum group_type {
+ group_has_spare = 0,
+ group_fully_busy = 1,
+ group_misfit_task = 2,
+ group_asym_packing = 3,
+ group_imbalanced = 4,
+ group_overloaded = 5,
+};
+
+enum migration_type {
+ migrate_load = 0,
+ migrate_util = 1,
+ migrate_task = 2,
+ migrate_misfit = 3,
+};
+
+struct lb_env {
+ struct sched_domain *sd;
+ struct rq *src_rq;
+ int src_cpu;
+ int dst_cpu;
+ struct rq *dst_rq;
+ struct cpumask *dst_grpmask;
+ int new_dst_cpu;
+ enum cpu_idle_type idle;
+ long int imbalance;
+ struct cpumask *cpus;
+ unsigned int flags;
+ unsigned int loop;
+ unsigned int loop_break;
+ unsigned int loop_max;
+ enum fbq_type fbq_type;
+ enum migration_type migration_type;
+ struct list_head tasks;
+};
+
+struct sg_lb_stats {
+ long unsigned int avg_load;
+ long unsigned int group_load;
+ long unsigned int group_capacity;
+ long unsigned int group_util;
+ long unsigned int group_runnable;
+ unsigned int sum_nr_running;
+ unsigned int sum_h_nr_running;
+ unsigned int idle_cpus;
+ unsigned int group_weight;
+ enum group_type group_type;
+ unsigned int group_asym_packing;
+ long unsigned int group_misfit_task_load;
+};
+
+struct sd_lb_stats {
+ struct sched_group *busiest;
+ struct sched_group *local;
+ long unsigned int total_load;
+ long unsigned int total_capacity;
+ long unsigned int avg_load;
+ unsigned int prefer_sibling;
+ struct sg_lb_stats busiest_stat;
+ struct sg_lb_stats local_stat;
+};
+
+typedef struct rt_rq *rt_rq_iter_t;
+
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+ long unsigned int timeout;
+};
+
+struct wait_bit_queue_entry {
+ struct wait_bit_key key;
+ struct wait_queue_entry wq_entry;
+};
+
+typedef int wait_bit_action_f(struct wait_bit_key *, int);
+
+struct sched_domain_attr {
+ int relax_domain_level;
+};
+
+struct s_data {
+ struct sched_domain **sd;
+ struct root_domain___2 *rd;
+};
+
+enum s_alloc {
+ sa_rootdomain = 0,
+ sa_sd = 1,
+ sa_sd_storage = 2,
+ sa_none = 3,
+};
+
+enum cpuacct_stat_index {
+ CPUACCT_STAT_USER = 0,
+ CPUACCT_STAT_SYSTEM = 1,
+ CPUACCT_STAT_NSTATS = 2,
+};
+
+struct cpuacct_usage {
+ u64 usages[2];
+};
+
+struct cpuacct {
+ struct cgroup_subsys_state css;
+ struct cpuacct_usage *cpuusage;
+ struct kernel_cpustat *cpustat;
+};
+
+struct gov_attr_set {
+ struct kobject kobj;
+ struct list_head policy_list;
+ struct mutex update_lock;
+ int usage_count;
+};
+
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct gov_attr_set *, char *);
+ ssize_t (*store)(struct gov_attr_set *, const char *, size_t);
+};
+
+struct sugov_tunables {
+ struct gov_attr_set attr_set;
+ unsigned int rate_limit_us;
+};
+
+struct sugov_policy {
+ struct cpufreq_policy *policy;
+ struct sugov_tunables *tunables;
+ struct list_head tunables_hook;
+ raw_spinlock_t update_lock;
+ u64 last_freq_update_time;
+ s64 freq_update_delay_ns;
+ unsigned int next_freq;
+ unsigned int cached_raw_freq;
+ struct irq_work irq_work;
+ struct kthread_work work;
+ struct mutex work_lock;
+ struct kthread_worker worker;
+ struct task_struct *thread;
+ bool work_in_progress;
+ bool limits_changed;
+ bool need_freq_update;
+};
+
+struct sugov_cpu {
+ struct update_util_data update_util;
+ struct sugov_policy *sg_policy;
+ unsigned int cpu;
+ bool iowait_boost_pending;
+ unsigned int iowait_boost;
+ u64 last_update;
+ long unsigned int bw_dl;
+ long unsigned int max;
+ long unsigned int saved_idle_calls;
+};
+
+enum {
+ MEMBARRIER_FLAG_SYNC_CORE = 1,
+};
+
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_GLOBAL = 1,
+ MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2,
+ MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4,
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16,
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64,
+ MEMBARRIER_CMD_SHARED = 1,
+};
+
+struct ww_class;
+
+struct ww_mutex;
+
+struct ww_acquire_ctx {
+ struct task_struct *task;
+ long unsigned int stamp;
+ unsigned int acquired;
+ short unsigned int wounded;
+ short unsigned int is_wait_die;
+ unsigned int done_acquire;
+ struct ww_class *ww_class;
+ struct ww_mutex *contending_lock;
+ struct lockdep_map dep_map;
+ unsigned int deadlock_inject_interval;
+ unsigned int deadlock_inject_countdown;
+};
+
+enum mutex_trylock_recursive_enum {
+ MUTEX_TRYLOCK_FAILED = 0,
+ MUTEX_TRYLOCK_SUCCESS = 1,
+ MUTEX_TRYLOCK_RECURSIVE = 2,
+};
+
+struct ww_class {
+ atomic_long_t stamp;
+ struct lock_class_key acquire_key;
+ struct lock_class_key mutex_key;
+ const char *acquire_name;
+ const char *mutex_name;
+ unsigned int is_wait_die;
+};
+
+struct ww_mutex {
+ struct mutex base;
+ struct ww_acquire_ctx *ctx;
+ struct ww_class *ww_class;
+};
+
+struct semaphore {
+ raw_spinlock_t lock;
+ unsigned int count;
+ struct list_head wait_list;
+};
+
+struct semaphore_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ bool up;
+};
+
+enum rwsem_waiter_type {
+ RWSEM_WAITING_FOR_WRITE = 0,
+ RWSEM_WAITING_FOR_READ = 1,
+};
+
+struct rwsem_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ enum rwsem_waiter_type type;
+ long unsigned int timeout;
+ long unsigned int last_rowner;
+};
+
+enum rwsem_wake_type {
+ RWSEM_WAKE_ANY = 0,
+ RWSEM_WAKE_READERS = 1,
+ RWSEM_WAKE_READ_OWNED = 2,
+};
+
+enum writer_wait_state {
+ WRITER_NOT_FIRST = 0,
+ WRITER_FIRST = 1,
+ WRITER_HANDOFF = 2,
+};
+
+enum owner_state {
+ OWNER_NULL = 1,
+ OWNER_WRITER = 2,
+ OWNER_READER = 4,
+ OWNER_NONSPINNABLE = 8,
+};
+
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct lock_class *links_to;
+ const struct lock_trace *trace;
+ int distance;
+ struct lock_list *parent;
+};
+
+struct lock_chain {
+ unsigned int irq_context: 2;
+ unsigned int depth: 6;
+ unsigned int base: 24;
+ struct hlist_node entry;
+ u64 chain_key;
+};
+
+enum lock_usage_bit {
+ LOCK_USED_IN_HARDIRQ = 0,
+ LOCK_USED_IN_HARDIRQ_READ = 1,
+ LOCK_ENABLED_HARDIRQ = 2,
+ LOCK_ENABLED_HARDIRQ_READ = 3,
+ LOCK_USED_IN_SOFTIRQ = 4,
+ LOCK_USED_IN_SOFTIRQ_READ = 5,
+ LOCK_ENABLED_SOFTIRQ = 6,
+ LOCK_ENABLED_SOFTIRQ_READ = 7,
+ LOCK_USED = 8,
+ LOCK_USAGE_STATES = 9,
+};
+
+struct trace_event_raw_lock_acquire {
+ struct trace_entry ent;
+ unsigned int flags;
+ u32 __data_loc_name;
+ void *lockdep_addr;
+ char __data[0];
+};
+
+struct trace_event_raw_lock {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ void *lockdep_addr;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_lock_acquire {
+ u32 name;
+};
+
+struct trace_event_data_offsets_lock {
+ u32 name;
+};
+
+typedef void (*btf_trace_lock_acquire)(void *, struct lockdep_map *, unsigned int, int, int, int, struct lockdep_map *, long unsigned int);
+
+typedef void (*btf_trace_lock_release)(void *, struct lockdep_map *, long unsigned int);
+
+struct pending_free {
+ struct list_head zapped;
+ long unsigned int lock_chains_being_freed[1024];
+};
+
+struct delayed_free {
+ struct callback_head callback_head;
+ int index;
+ int scheduled;
+ struct pending_free pf[2];
+};
+
+struct circular_queue {
+ struct lock_list *element[4096];
+ unsigned int front;
+ unsigned int rear;
+};
+
+typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, enum lock_usage_bit, const char *);
+
+enum {
+ LOCKF_USED_IN_HARDIRQ = 1,
+ LOCKF_USED_IN_HARDIRQ_READ = 2,
+ LOCKF_ENABLED_HARDIRQ = 4,
+ LOCKF_ENABLED_HARDIRQ_READ = 8,
+ LOCKF_USED_IN_SOFTIRQ = 16,
+ LOCKF_USED_IN_SOFTIRQ_READ = 32,
+ LOCKF_ENABLED_SOFTIRQ = 64,
+ LOCKF_ENABLED_SOFTIRQ_READ = 128,
+ LOCKF_USED = 256,
+};
+
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next;
+ struct optimistic_spin_node *prev;
+ int locked;
+ int cpu;
+};
+
+struct mcs_spinlock {
+ struct mcs_spinlock *next;
+ int locked;
+ int count;
+};
+
+struct qnode {
+ struct mcs_spinlock mcs;
+};
+
+struct hrtimer_sleeper {
+ struct hrtimer timer;
+ struct task_struct *task;
+};
+
+struct rt_mutex;
+
+struct rt_mutex_waiter {
+ struct rb_node tree_entry;
+ struct rb_node pi_tree_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
+ long unsigned int ip;
+ struct pid *deadlock_task_pid;
+ struct rt_mutex *deadlock_lock;
+ int prio;
+ u64 deadline;
+};
+
+struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+ int save_state;
+ const char *name;
+ const char *file;
+ int line;
+ void *magic;
+ struct lockdep_map dep_map;
+};
+
+enum rtmutex_chainwalk {
+ RT_MUTEX_MIN_CHAINWALK = 0,
+ RT_MUTEX_FULL_CHAINWALK = 1,
+};
+
+struct pm_qos_request {
+ struct plist_node node;
+ struct pm_qos_constraints *qos;
+};
+
+enum pm_qos_req_action {
+ PM_QOS_ADD_REQ = 0,
+ PM_QOS_UPDATE_REQ = 1,
+ PM_QOS_REMOVE_REQ = 2,
+};
+
+struct miscdevice {
+ int minor;
+ const char *name;
+ const struct file_operations *fops;
+ struct list_head list;
+ struct device *parent;
+ struct device *this_device;
+ const struct attribute_group **groups;
+ const char *nodename;
+ umode_t mode;
+};
+
+struct kmsg_dumper {
+ struct list_head list;
+ void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason);
+ enum kmsg_dump_reason max_reason;
+ bool active;
+ bool registered;
+ u32 cur_idx;
+ u32 next_idx;
+ u64 cur_seq;
+ u64 next_seq;
+};
+
+struct trace_event_raw_console {
+ struct trace_entry ent;
+ u32 __data_loc_msg;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_console {
+ u32 msg;
+};
+
+typedef void (*btf_trace_console)(void *, const char *, size_t);
+
+struct console_cmdline {
+ char name[16];
+ int index;
+ bool user_specified;
+ char *options;
+};
+
+enum devkmsg_log_bits {
+ __DEVKMSG_LOG_BIT_ON = 0,
+ __DEVKMSG_LOG_BIT_OFF = 1,
+ __DEVKMSG_LOG_BIT_LOCK = 2,
+};
+
+enum devkmsg_log_masks {
+ DEVKMSG_LOG_MASK_ON = 1,
+ DEVKMSG_LOG_MASK_OFF = 2,
+ DEVKMSG_LOG_MASK_LOCK = 4,
+};
+
+enum con_msg_format_flags {
+ MSG_FORMAT_DEFAULT = 0,
+ MSG_FORMAT_SYSLOG = 1,
+};
+
+enum log_flags {
+ LOG_NEWLINE = 2,
+ LOG_CONT = 8,
+};
+
+struct printk_log {
+ u64 ts_nsec;
+ u16 len;
+ u16 text_len;
+ u16 dict_len;
+ u8 facility;
+ u8 flags: 5;
+ u8 level: 3;
+};
+
+struct devkmsg_user {
+ u64 seq;
+ u32 idx;
+ struct ratelimit_state rs;
+ struct mutex lock;
+ char buf[8192];
+};
+
+struct cont {
+ char buf[992];
+ size_t len;
+ u32 caller_id;
+ u64 ts_nsec;
+ u8 level;
+ u8 facility;
+ enum log_flags flags;
+};
+
+struct printk_safe_seq_buf {
+ atomic_t len;
+ atomic_t message_lost;
+ struct irq_work work;
+ unsigned char buffer[8160];
+};
+
+enum {
+ IRQS_AUTODETECT = 1,
+ IRQS_SPURIOUS_DISABLED = 2,
+ IRQS_POLL_INPROGRESS = 8,
+ IRQS_ONESHOT = 32,
+ IRQS_REPLAY = 64,
+ IRQS_WAITING = 128,
+ IRQS_PENDING = 512,
+ IRQS_SUSPENDED = 2048,
+ IRQS_TIMINGS = 4096,
+ IRQS_NMI = 8192,
+};
+
+enum {
+ _IRQ_DEFAULT_INIT_FLAGS = 0,
+ _IRQ_PER_CPU = 512,
+ _IRQ_LEVEL = 256,
+ _IRQ_NOPROBE = 1024,
+ _IRQ_NOREQUEST = 2048,
+ _IRQ_NOTHREAD = 65536,
+ _IRQ_NOAUTOEN = 4096,
+ _IRQ_MOVE_PCNTXT = 16384,
+ _IRQ_NO_BALANCING = 8192,
+ _IRQ_NESTED_THREAD = 32768,
+ _IRQ_PER_CPU_DEVID = 131072,
+ _IRQ_IS_POLLED = 262144,
+ _IRQ_DISABLE_UNLAZY = 524288,
+ _IRQF_MODIFY_MASK = 1048335,
+};
+
+enum {
+ IRQTF_RUNTHREAD = 0,
+ IRQTF_WARNED = 1,
+ IRQTF_AFFINITY = 2,
+ IRQTF_FORCED_THREAD = 3,
+};
+
+enum {
+ IRQC_IS_HARDIRQ = 0,
+ IRQC_IS_NESTED = 1,
+};
+
+enum {
+ IRQ_STARTUP_NORMAL = 0,
+ IRQ_STARTUP_MANAGED = 1,
+ IRQ_STARTUP_ABORT = 2,
+};
+
+struct irq_devres {
+ unsigned int irq;
+ void *dev_id;
+};
+
+struct irq_desc_devres {
+ unsigned int from;
+ unsigned int cnt;
+};
+
+struct of_phandle_args {
+ struct device_node *np;
+ int args_count;
+ uint32_t args[16];
+};
+
+struct irqchip_fwid {
+ struct fwnode_handle fwnode;
+ unsigned int type;
+ char *name;
+ phys_addr_t *pa;
+};
+
+enum {
+ AFFINITY = 0,
+ AFFINITY_LIST = 1,
+ EFFECTIVE = 2,
+ EFFECTIVE_LIST = 3,
+};
+
+struct irq_affinity {
+ unsigned int pre_vectors;
+ unsigned int post_vectors;
+ unsigned int nr_sets;
+ unsigned int set_size[4];
+ void (*calc_sets)(struct irq_affinity *, unsigned int);
+ void *priv;
+};
+
+struct node_vectors {
+ unsigned int id;
+ union {
+ unsigned int nvectors;
+ unsigned int ncpus;
+ };
+};
+
+struct cpumap {
+ unsigned int available;
+ unsigned int allocated;
+ unsigned int managed;
+ unsigned int managed_allocated;
+ bool initialized;
+ bool online;
+ long unsigned int alloc_map[4];
+ long unsigned int managed_map[4];
+};
+
+struct irq_matrix___2 {
+ unsigned int matrix_bits;
+ unsigned int alloc_start;
+ unsigned int alloc_end;
+ unsigned int alloc_size;
+ unsigned int global_available;
+ unsigned int global_reserved;
+ unsigned int systembits_inalloc;
+ unsigned int total_allocated;
+ unsigned int online_maps;
+ struct cpumap *maps;
+ long unsigned int scratch_map[4];
+ long unsigned int system_map[4];
+};
+
+struct trace_event_raw_irq_matrix_global {
+ struct trace_entry ent;
+ unsigned int online_maps;
+ unsigned int global_available;
+ unsigned int global_reserved;
+ unsigned int total_allocated;
+ char __data[0];
+};
+
+struct trace_event_raw_irq_matrix_global_update {
+ struct trace_entry ent;
+ int bit;
+ unsigned int online_maps;
+ unsigned int global_available;
+ unsigned int global_reserved;
+ unsigned int total_allocated;
+ char __data[0];
+};
+
+struct trace_event_raw_irq_matrix_cpu {
+ struct trace_entry ent;
+ int bit;
+ unsigned int cpu;
+ bool online;
+ unsigned int available;
+ unsigned int allocated;
+ unsigned int managed;
+ unsigned int online_maps;
+ unsigned int global_available;
+ unsigned int global_reserved;
+ unsigned int total_allocated;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_irq_matrix_global {};
+
+struct trace_event_data_offsets_irq_matrix_global_update {};
+
+struct trace_event_data_offsets_irq_matrix_cpu {};
+
+typedef void (*btf_trace_irq_matrix_online)(void *, struct irq_matrix___2 *);
+
+typedef void (*btf_trace_irq_matrix_offline)(void *, struct irq_matrix___2 *);
+
+typedef void (*btf_trace_irq_matrix_reserve)(void *, struct irq_matrix___2 *);
+
+typedef void (*btf_trace_irq_matrix_remove_reserved)(void *, struct irq_matrix___2 *);
+
+typedef void (*btf_trace_irq_matrix_assign_system)(void *, int, struct irq_matrix___2 *);
+
+typedef void (*btf_trace_irq_matrix_alloc_reserved)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_reserve_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_remove_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_alloc_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_assign)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_alloc)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*btf_trace_irq_matrix_free)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *);
+
+typedef void (*rcu_callback_t)(struct callback_head *);
+
+typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t);
+
+struct rcu_synchronize {
+ struct callback_head head;
+ struct completion completion;
+};
+
+struct trace_event_raw_rcu_utilization {
+ struct trace_entry ent;
+ const char *s;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_grace_period {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ const char *gpevent;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_future_grace_period {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ long unsigned int gp_seq_req;
+ u8 level;
+ int grplo;
+ int grphi;
+ const char *gpevent;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_grace_period_init {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ u8 level;
+ int grplo;
+ int grphi;
+ long unsigned int qsmask;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_exp_grace_period {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gpseq;
+ const char *gpevent;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_exp_funnel_lock {
+ struct trace_entry ent;
+ const char *rcuname;
+ u8 level;
+ int grplo;
+ int grphi;
+ const char *gpevent;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_nocb_wake {
+ struct trace_entry ent;
+ const char *rcuname;
+ int cpu;
+ const char *reason;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_preempt_task {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_unlock_preempted_task {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_quiescent_state_report {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ long unsigned int mask;
+ long unsigned int qsmask;
+ u8 level;
+ int grplo;
+ int grphi;
+ u8 gp_tasks;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_fqs {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int gp_seq;
+ int cpu;
+ const char *qsevent;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_dyntick {
+ struct trace_entry ent;
+ const char *polarity;
+ long int oldnesting;
+ long int newnesting;
+ int dynticks;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_callback {
+ struct trace_entry ent;
+ const char *rcuname;
+ void *rhp;
+ void *func;
+ long int qlen;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_kfree_callback {
+ struct trace_entry ent;
+ const char *rcuname;
+ void *rhp;
+ long unsigned int offset;
+ long int qlen;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_batch_start {
+ struct trace_entry ent;
+ const char *rcuname;
+ long int qlen;
+ long int blimit;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_invoke_callback {
+ struct trace_entry ent;
+ const char *rcuname;
+ void *rhp;
+ void *func;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_invoke_kfree_callback {
+ struct trace_entry ent;
+ const char *rcuname;
+ void *rhp;
+ long unsigned int offset;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_invoke_kfree_bulk_callback {
+ struct trace_entry ent;
+ const char *rcuname;
+ long unsigned int nr_records;
+ void **p;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_batch_end {
+ struct trace_entry ent;
+ const char *rcuname;
+ int callbacks_invoked;
+ char cb;
+ char nr;
+ char iit;
+ char risk;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_torture_read {
+ struct trace_entry ent;
+ char rcutorturename[8];
+ struct callback_head *rhp;
+ long unsigned int secs;
+ long unsigned int c_old;
+ long unsigned int c;
+ char __data[0];
+};
+
+struct trace_event_raw_rcu_barrier {
+ struct trace_entry ent;
+ const char *rcuname;
+ const char *s;
+ int cpu;
+ int cnt;
+ long unsigned int done;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_rcu_utilization {};
+
+struct trace_event_data_offsets_rcu_grace_period {};
+
+struct trace_event_data_offsets_rcu_future_grace_period {};
+
+struct trace_event_data_offsets_rcu_grace_period_init {};
+
+struct trace_event_data_offsets_rcu_exp_grace_period {};
+
+struct trace_event_data_offsets_rcu_exp_funnel_lock {};
+
+struct trace_event_data_offsets_rcu_nocb_wake {};
+
+struct trace_event_data_offsets_rcu_preempt_task {};
+
+struct trace_event_data_offsets_rcu_unlock_preempted_task {};
+
+struct trace_event_data_offsets_rcu_quiescent_state_report {};
+
+struct trace_event_data_offsets_rcu_fqs {};
+
+struct trace_event_data_offsets_rcu_dyntick {};
+
+struct trace_event_data_offsets_rcu_callback {};
+
+struct trace_event_data_offsets_rcu_kfree_callback {};
+
+struct trace_event_data_offsets_rcu_batch_start {};
+
+struct trace_event_data_offsets_rcu_invoke_callback {};
+
+struct trace_event_data_offsets_rcu_invoke_kfree_callback {};
+
+struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {};
+
+struct trace_event_data_offsets_rcu_batch_end {};
+
+struct trace_event_data_offsets_rcu_torture_read {};
+
+struct trace_event_data_offsets_rcu_barrier {};
+
+typedef void (*btf_trace_rcu_utilization)(void *, const char *);
+
+typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *);
+
+typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *);
+
+typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int);
+
+typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *);
+
+typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *);
+
+typedef void (*btf_trace_rcu_nocb_wake)(void *, const char *, int, const char *);
+
+typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int);
+
+typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int);
+
+typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int);
+
+typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *);
+
+typedef void (*btf_trace_rcu_dyntick)(void *, const char *, long int, long int, int);
+
+typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int);
+
+typedef void (*btf_trace_rcu_kfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int);
+
+typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int);
+
+typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *);
+
+typedef void (*btf_trace_rcu_invoke_kfree_callback)(void *, const char *, struct callback_head *, long unsigned int);
+
+typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **);
+
+typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char);
+
+typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int);
+
+struct early_boot_kfree_rcu {
+ struct callback_head rh;
+};
+
+enum {
+ GP_IDLE = 0,
+ GP_ENTER = 1,
+ GP_PASSED = 2,
+ GP_EXIT = 3,
+ GP_REPLAY = 4,
+};
+
+struct rcu_cblist {
+ struct callback_head *head;
+ struct callback_head **tail;
+ long int len;
+};
+
+enum rcutorture_type {
+ RCU_FLAVOR = 0,
+ RCU_TASKS_FLAVOR = 1,
+ RCU_TASKS_RUDE_FLAVOR = 2,
+ RCU_TASKS_TRACING_FLAVOR = 3,
+ RCU_TRIVIAL_FLAVOR = 4,
+ SRCU_FLAVOR = 5,
+ INVALID_RCU_FLAVOR = 6,
+};
+
+enum tick_device_mode {
+ TICKDEV_MODE_PERIODIC = 0,
+ TICKDEV_MODE_ONESHOT = 1,
+};
+
+struct tick_device___2 {
+ struct clock_event_device *evtdev;
+ enum tick_device_mode mode;
+};
+
+struct sysrq_key_op {
+ void (* const handler)(int);
+ const char * const help_msg;
+ const char * const action_msg;
+ const int enable_mask;
+};
+
+struct rcu_exp_work {
+ long unsigned int rew_s;
+ struct work_struct rew_work;
+};
+
+struct rcu_node {
+ raw_spinlock_t lock;
+ long unsigned int gp_seq;
+ long unsigned int gp_seq_needed;
+ long unsigned int completedqs;
+ long unsigned int qsmask;
+ long unsigned int rcu_gp_init_mask;
+ long unsigned int qsmaskinit;
+ long unsigned int qsmaskinitnext;
+ long unsigned int expmask;
+ long unsigned int expmaskinit;
+ long unsigned int expmaskinitnext;
+ long unsigned int cbovldmask;
+ long unsigned int ffmask;
+ long unsigned int grpmask;
+ int grplo;
+ int grphi;
+ u8 grpnum;
+ u8 level;
+ bool wait_blkd_tasks;
+ struct rcu_node *parent;
+ struct list_head blkd_tasks;
+ struct list_head *gp_tasks;
+ struct list_head *exp_tasks;
+ struct list_head *boost_tasks;
+ struct rt_mutex boost_mtx;
+ long unsigned int boost_time;
+ struct task_struct *boost_kthread_task;
+ unsigned int boost_kthread_status;
+ struct swait_queue_head nocb_gp_wq[2];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ raw_spinlock_t fqslock;
+ spinlock_t exp_lock;
+ long unsigned int exp_seq_rq;
+ wait_queue_head_t exp_wq[4];
+ struct rcu_exp_work rew;
+ bool exp_need_flush;
+ long: 56;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+union rcu_noqs {
+ struct {
+ u8 norm;
+ u8 exp;
+ } b;
+ u16 s;
+};
+
+struct rcu_data {
+ long unsigned int gp_seq;
+ long unsigned int gp_seq_needed;
+ union rcu_noqs cpu_no_qs;
+ bool core_needs_qs;
+ bool beenonline;
+ bool gpwrap;
+ bool exp_deferred_qs;
+ struct rcu_node *mynode;
+ long unsigned int grpmask;
+ long unsigned int ticks_this_gp;
+ struct irq_work defer_qs_iw;
+ bool defer_qs_iw_pending;
+ struct rcu_segcblist cblist;
+ long int qlen_last_fqs_check;
+ long unsigned int n_force_qs_snap;
+ long int blimit;
+ int dynticks_snap;
+ long int dynticks_nesting;
+ long int dynticks_nmi_nesting;
+ atomic_t dynticks;
+ bool rcu_need_heavy_qs;
+ bool rcu_urgent_qs;
+ bool rcu_forced_tick;
+ bool rcu_forced_tick_exp;
+ struct callback_head barrier_head;
+ int exp_dynticks_snap;
+ struct swait_queue_head nocb_cb_wq;
+ struct task_struct *nocb_gp_kthread;
+ raw_spinlock_t nocb_lock;
+ atomic_t nocb_lock_contended;
+ int nocb_defer_wakeup;
+ struct timer_list nocb_timer;
+ long unsigned int nocb_gp_adv_time;
+ long: 64;
+ long: 64;
+ raw_spinlock_t nocb_bypass_lock;
+ struct rcu_cblist nocb_bypass;
+ long unsigned int nocb_bypass_first;
+ long unsigned int nocb_nobypass_last;
+ int nocb_nobypass_count;
+ long: 32;
+ long: 64;
+ long: 64;
+ raw_spinlock_t nocb_gp_lock;
+ struct timer_list nocb_bypass_timer;
+ u8 nocb_gp_sleep;
+ u8 nocb_gp_bypass;
+ u8 nocb_gp_gp;
+ long unsigned int nocb_gp_seq;
+ long unsigned int nocb_gp_loops;
+ struct swait_queue_head nocb_gp_wq;
+ bool nocb_cb_sleep;
+ struct task_struct *nocb_cb_kthread;
+ struct rcu_data *nocb_next_cb_rdp;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct rcu_data *nocb_gp_rdp;
+ struct task_struct *rcu_cpu_kthread_task;
+ unsigned int rcu_cpu_kthread_status;
+ char rcu_cpu_has_work;
+ unsigned int softirq_snap;
+ struct irq_work rcu_iw;
+ bool rcu_iw_pending;
+ long unsigned int rcu_iw_gp_seq;
+ long unsigned int rcu_ofl_gp_seq;
+ short int rcu_ofl_gp_flags;
+ long unsigned int rcu_onl_gp_seq;
+ short int rcu_onl_gp_flags;
+ long unsigned int last_fqs_resched;
+ int cpu;
+ long: 32;
+ long: 64;
+};
+
+struct rcu_state {
+ struct rcu_node node[16];
+ struct rcu_node *level[3];
+ int ncpus;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u8 boost;
+ long unsigned int gp_seq;
+ struct task_struct *gp_kthread;
+ struct swait_queue_head gp_wq;
+ short int gp_flags;
+ short int gp_state;
+ long unsigned int gp_wake_time;
+ long unsigned int gp_wake_seq;
+ struct mutex barrier_mutex;
+ atomic_t barrier_cpu_count;
+ struct completion barrier_completion;
+ long unsigned int barrier_sequence;
+ struct mutex exp_mutex;
+ struct mutex exp_wake_mutex;
+ long unsigned int expedited_sequence;
+ atomic_t expedited_need_qs;
+ struct swait_queue_head expedited_wq;
+ int ncpus_snap;
+ u8 cbovld;
+ u8 cbovldnext;
+ long unsigned int jiffies_force_qs;
+ long unsigned int jiffies_kick_kthreads;
+ long unsigned int n_force_qs;
+ long unsigned int gp_start;
+ long unsigned int gp_end;
+ long unsigned int gp_activity;
+ long unsigned int gp_req_activity;
+ long unsigned int jiffies_stall;
+ long unsigned int jiffies_resched;
+ long unsigned int n_force_qs_gpstart;
+ long unsigned int gp_max;
+ const char *name;
+ char abbr;
+ long: 56;
+ long: 64;
+ long: 64;
+ long: 64;
+ raw_spinlock_t ofl_lock;
+};
+
+struct kfree_rcu_bulk_data {
+ long unsigned int nr_records;
+ void *records[509];
+ struct kfree_rcu_bulk_data *next;
+ struct callback_head *head_free_debug;
+};
+
+struct kfree_rcu_cpu;
+
+struct kfree_rcu_cpu_work {
+ struct rcu_work rcu_work;
+ struct callback_head *head_free;
+ struct kfree_rcu_bulk_data *bhead_free;
+ struct kfree_rcu_cpu *krcp;
+};
+
+struct kfree_rcu_cpu {
+ struct callback_head *head;
+ struct kfree_rcu_bulk_data *bhead;
+ struct kfree_rcu_bulk_data *bcached;
+ struct kfree_rcu_cpu_work krw_arr[2];
+ spinlock_t lock;
+ struct delayed_work monitor_work;
+ bool monitor_todo;
+ bool initialized;
+ int count;
+};
+
+typedef u8 pto_T_____20;
+
+typedef char pto_T_____21;
+
+struct dma_devres {
+ size_t size;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ long unsigned int attrs;
+};
+
+enum dma_sync_target {
+ SYNC_FOR_CPU = 0,
+ SYNC_FOR_DEVICE = 1,
+};
+
+struct trace_event_raw_swiotlb_bounced {
+ struct trace_entry ent;
+ u32 __data_loc_dev_name;
+ u64 dma_mask;
+ dma_addr_t dev_addr;
+ size_t size;
+ enum swiotlb_force swiotlb_force;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_swiotlb_bounced {
+ u32 dev_name;
+};
+
+typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t, enum swiotlb_force);
+
+enum profile_type {
+ PROFILE_TASK_EXIT = 0,
+ PROFILE_MUNMAP = 1,
+};
+
+struct profile_hit {
+ u32 pc;
+ u32 hits;
+};
+
+struct stacktrace_cookie {
+ long unsigned int *store;
+ unsigned int size;
+ unsigned int skip;
+ unsigned int len;
+};
+
+typedef __kernel_long_t __kernel_suseconds_t;
+
+typedef __kernel_long_t __kernel_old_time_t;
+
+typedef __kernel_suseconds_t suseconds_t;
+
+typedef __kernel_clock_t clock_t;
+
+typedef __u64 timeu64_t;
+
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval;
+ struct __kernel_timespec it_value;
+};
+
+struct itimerspec64 {
+ struct timespec64 it_interval;
+ struct timespec64 it_value;
+};
+
+struct old_itimerspec32 {
+ struct old_timespec32 it_interval;
+ struct old_timespec32 it_value;
+};
+
+struct __kernel_timex_timeval {
+ __kernel_time64_t tv_sec;
+ long long int tv_usec;
+};
+
+struct __kernel_timex {
+ unsigned int modes;
+ long long int offset;
+ long long int freq;
+ long long int maxerror;
+ long long int esterror;
+ int status;
+ long long int constant;
+ long long int precision;
+ long long int tolerance;
+ struct __kernel_timex_timeval time;
+ long long int tick;
+ long long int ppsfreq;
+ long long int jitter;
+ int shift;
+ long long int stabil;
+ long long int jitcnt;
+ long long int calcnt;
+ long long int errcnt;
+ long long int stbcnt;
+ int tai;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct trace_event_raw_timer_class {
+ struct trace_entry ent;
+ void *timer;
+ char __data[0];
+};
+
+struct trace_event_raw_timer_start {
+ struct trace_entry ent;
+ void *timer;
+ void *function;
+ long unsigned int expires;
+ long unsigned int now;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_timer_expire_entry {
+ struct trace_entry ent;
+ void *timer;
+ long unsigned int now;
+ void *function;
+ long unsigned int baseclk;
+ char __data[0];
+};
+
+struct trace_event_raw_hrtimer_init {
+ struct trace_entry ent;
+ void *hrtimer;
+ clockid_t clockid;
+ enum hrtimer_mode mode;
+ char __data[0];
+};
+
+struct trace_event_raw_hrtimer_start {
+ struct trace_entry ent;
+ void *hrtimer;
+ void *function;
+ s64 expires;
+ s64 softexpires;
+ enum hrtimer_mode mode;
+ char __data[0];
+};
+
+struct trace_event_raw_hrtimer_expire_entry {
+ struct trace_entry ent;
+ void *hrtimer;
+ s64 now;
+ void *function;
+ char __data[0];
+};
+
+struct trace_event_raw_hrtimer_class {
+ struct trace_entry ent;
+ void *hrtimer;
+ char __data[0];
+};
+
+struct trace_event_raw_itimer_state {
+ struct trace_entry ent;
+ int which;
+ long long unsigned int expires;
+ long int value_sec;
+ long int value_nsec;
+ long int interval_sec;
+ long int interval_nsec;
+ char __data[0];
+};
+
+struct trace_event_raw_itimer_expire {
+ struct trace_entry ent;
+ int which;
+ pid_t pid;
+ long long unsigned int now;
+ char __data[0];
+};
+
+struct trace_event_raw_tick_stop {
+ struct trace_entry ent;
+ int success;
+ int dependency;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_timer_class {};
+
+struct trace_event_data_offsets_timer_start {};
+
+struct trace_event_data_offsets_timer_expire_entry {};
+
+struct trace_event_data_offsets_hrtimer_init {};
+
+struct trace_event_data_offsets_hrtimer_start {};
+
+struct trace_event_data_offsets_hrtimer_expire_entry {};
+
+struct trace_event_data_offsets_hrtimer_class {};
+
+struct trace_event_data_offsets_itimer_state {};
+
+struct trace_event_data_offsets_itimer_expire {};
+
+struct trace_event_data_offsets_tick_stop {};
+
+typedef void (*btf_trace_timer_init)(void *, struct timer_list *);
+
+typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int, unsigned int);
+
+typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int);
+
+typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *);
+
+typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *);
+
+typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode);
+
+typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode);
+
+typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *);
+
+typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *);
+
+typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *);
+
+typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int);
+
+typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int);
+
+typedef void (*btf_trace_tick_stop)(void *, int, int);
+
+struct timer_base {
+ raw_spinlock_t lock;
+ struct timer_list *running_timer;
+ long unsigned int clk;
+ long unsigned int next_expiry;
+ unsigned int cpu;
+ bool is_idle;
+ bool must_forward_clk;
+ long unsigned int pending_map[9];
+ struct hlist_head vectors[576];
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct process_timer {
+ struct timer_list timer;
+ struct task_struct *task;
+};
+
+struct system_time_snapshot {
+ u64 cycles;
+ ktime_t real;
+ ktime_t raw;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
+};
+
+struct system_device_crosststamp {
+ ktime_t device;
+ ktime_t sys_realtime;
+ ktime_t sys_monoraw;
+};
+
+struct tk_read_base {
+ struct clocksource *clock;
+ u64 mask;
+ u64 cycle_last;
+ u32 mult;
+ u32 shift;
+ u64 xtime_nsec;
+ ktime_t base;
+ u64 base_real;
+};
+
+struct timekeeper {
+ struct tk_read_base tkr_mono;
+ struct tk_read_base tkr_raw;
+ u64 xtime_sec;
+ long unsigned int ktime_sec;
+ struct timespec64 wall_to_monotonic;
+ ktime_t offs_real;
+ ktime_t offs_boot;
+ ktime_t offs_tai;
+ s32 tai_offset;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
+ ktime_t next_leap_ktime;
+ u64 raw_sec;
+ struct timespec64 monotonic_to_boot;
+ u64 cycle_interval;
+ u64 xtime_interval;
+ s64 xtime_remainder;
+ u64 raw_interval;
+ u64 ntp_tick;
+ s64 ntp_error;
+ u32 ntp_error_shift;
+ u32 ntp_err_mult;
+ u32 skip_second_overflow;
+};
+
+struct audit_ntp_val {
+ long long int oldval;
+ long long int newval;
+};
+
+struct audit_ntp_data {
+ struct audit_ntp_val vals[6];
+};
+
+enum timekeeping_adv_mode {
+ TK_ADV_TICK = 0,
+ TK_ADV_FREQ = 1,
+};
+
+struct tk_fast {
+ seqcount_t seq;
+ struct tk_read_base base[2];
+};
+
+typedef s64 int64_t;
+
+enum tick_nohz_mode {
+ NOHZ_MODE_INACTIVE = 0,
+ NOHZ_MODE_LOWRES = 1,
+ NOHZ_MODE_HIGHRES = 2,
+};
+
+struct tick_sched {
+ struct hrtimer sched_timer;
+ long unsigned int check_clocks;
+ enum tick_nohz_mode nohz_mode;
+ unsigned int inidle: 1;
+ unsigned int tick_stopped: 1;
+ unsigned int idle_active: 1;
+ unsigned int do_timer_last: 1;
+ unsigned int got_idle_tick: 1;
+ ktime_t last_tick;
+ ktime_t next_tick;
+ long unsigned int idle_jiffies;
+ long unsigned int idle_calls;
+ long unsigned int idle_sleeps;
+ ktime_t idle_entrytime;
+ ktime_t idle_waketime;
+ ktime_t idle_exittime;
+ ktime_t idle_sleeptime;
+ ktime_t iowait_sleeptime;
+ long unsigned int last_jiffies;
+ u64 timer_expires;
+ u64 timer_expires_base;
+ u64 next_timer;
+ ktime_t idle_expires;
+ atomic_t tick_dep_mask;
+};
+
+struct timer_list_iter {
+ int cpu;
+ bool second_pass;
+ u64 now;
+};
+
+struct tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ long int tm_year;
+ int tm_wday;
+ int tm_yday;
+};
+
+struct cyclecounter {
+ u64 (*read)(const struct cyclecounter *);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+};
+
+struct timecounter {
+ const struct cyclecounter *cc;
+ u64 cycle_last;
+ u64 nsec;
+ u64 mask;
+ u64 frac;
+};
+
+typedef __kernel_timer_t timer_t;
+
+struct rtc_wkalrm {
+ unsigned char enabled;
+ unsigned char pending;
+ struct rtc_time time;
+};
+
+enum alarmtimer_type {
+ ALARM_REALTIME = 0,
+ ALARM_BOOTTIME = 1,
+ ALARM_NUMTYPE = 2,
+ ALARM_REALTIME_FREEZER = 3,
+ ALARM_BOOTTIME_FREEZER = 4,
+};
+
+enum alarmtimer_restart {
+ ALARMTIMER_NORESTART = 0,
+ ALARMTIMER_RESTART = 1,
+};
+
+struct alarm {
+ struct timerqueue_node node;
+ struct hrtimer timer;
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t);
+ enum alarmtimer_type type;
+ int state;
+ void *data;
+};
+
+struct cpu_timer {
+ struct timerqueue_node node;
+ struct timerqueue_head *head;
+ struct pid *pid;
+ struct list_head elist;
+ int firing;
+};
+
+struct k_clock;
+
+struct k_itimer {
+ struct list_head list;
+ struct hlist_node t_hash;
+ spinlock_t it_lock;
+ const struct k_clock *kclock;
+ clockid_t it_clock;
+ timer_t it_id;
+ int it_active;
+ s64 it_overrun;
+ s64 it_overrun_last;
+ int it_requeue_pending;
+ int it_sigev_notify;
+ ktime_t it_interval;
+ struct signal_struct *it_signal;
+ union {
+ struct pid *it_pid;
+ struct task_struct *it_process;
+ };
+ struct sigqueue *sigq;
+ union {
+ struct {
+ struct hrtimer timer;
+ } real;
+ struct cpu_timer cpu;
+ struct {
+ struct alarm alarmtimer;
+ } alarm;
+ } it;
+ struct callback_head rcu;
+};
+
+struct k_clock {
+ int (*clock_getres)(const clockid_t, struct timespec64 *);
+ int (*clock_set)(const clockid_t, const struct timespec64 *);
+ int (*clock_get_timespec)(const clockid_t, struct timespec64 *);
+ ktime_t (*clock_get_ktime)(const clockid_t);
+ int (*clock_adj)(const clockid_t, struct __kernel_timex *);
+ int (*timer_create)(struct k_itimer *);
+ int (*nsleep)(const clockid_t, int, const struct timespec64 *);
+ int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *);
+ int (*timer_del)(struct k_itimer *);
+ void (*timer_get)(struct k_itimer *, struct itimerspec64 *);
+ void (*timer_rearm)(struct k_itimer *);
+ s64 (*timer_forward)(struct k_itimer *, ktime_t);
+ ktime_t (*timer_remaining)(struct k_itimer *, ktime_t);
+ int (*timer_try_to_cancel)(struct k_itimer *);
+ void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool);
+ void (*timer_wait_running)(struct k_itimer *);
+};
+
+struct rtc_class_ops {
+ int (*ioctl)(struct device *, unsigned int, long unsigned int);
+ int (*read_time)(struct device *, struct rtc_time *);
+ int (*set_time)(struct device *, struct rtc_time *);
+ int (*read_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*set_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*proc)(struct device *, struct seq_file *);
+ int (*alarm_irq_enable)(struct device *, unsigned int);
+ int (*read_offset)(struct device *, long int *);
+ int (*set_offset)(struct device *, long int);
+};
+
+struct rtc_device;
+
+struct rtc_timer {
+ struct timerqueue_node node;
+ ktime_t period;
+ void (*func)(struct rtc_device *);
+ struct rtc_device *rtc;
+ int enabled;
+};
+
+struct rtc_device {
+ struct device dev;
+ struct module *owner;
+ int id;
+ const struct rtc_class_ops *ops;
+ struct mutex ops_lock;
+ struct cdev char_dev;
+ long unsigned int flags;
+ long unsigned int irq_data;
+ spinlock_t irq_lock;
+ wait_queue_head_t irq_queue;
+ struct fasync_struct *async_queue;
+ int irq_freq;
+ int max_user_freq;
+ struct timerqueue_head timerqueue;
+ struct rtc_timer aie_timer;
+ struct rtc_timer uie_rtctimer;
+ struct hrtimer pie_timer;
+ int pie_enabled;
+ struct work_struct irqwork;
+ int uie_unsupported;
+ long int set_offset_nsec;
+ bool registered;
+ bool nvram_old_abi;
+ struct bin_attribute *nvram;
+ time64_t range_min;
+ timeu64_t range_max;
+ time64_t start_secs;
+ time64_t offset_secs;
+ bool set_start_time;
+};
+
+struct platform_driver {
+ int (*probe)(struct platform_device *);
+ int (*remove)(struct platform_device *);
+ void (*shutdown)(struct platform_device *);
+ int (*suspend)(struct platform_device *, pm_message_t);
+ int (*resume)(struct platform_device *);
+ struct device_driver driver;
+ const struct platform_device_id *id_table;
+ bool prevent_deferred_probe;
+};
+
+struct trace_event_raw_alarmtimer_suspend {
+ struct trace_entry ent;
+ s64 expires;
+ unsigned char alarm_type;
+ char __data[0];
+};
+
+struct trace_event_raw_alarm_class {
+ struct trace_entry ent;
+ void *alarm;
+ unsigned char alarm_type;
+ s64 expires;
+ s64 now;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_alarmtimer_suspend {};
+
+struct trace_event_data_offsets_alarm_class {};
+
+typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int);
+
+typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t);
+
+typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t);
+
+typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t);
+
+struct alarm_base {
+ spinlock_t lock;
+ struct timerqueue_head timerqueue;
+ ktime_t (*get_ktime)();
+ void (*get_timespec)(struct timespec64 *);
+ clockid_t base_clockid;
+};
+
+struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[12];
+ int _tid;
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute;
+ } _sigev_thread;
+ } _sigev_un;
+};
+
+typedef struct sigevent sigevent_t;
+
+typedef unsigned int uint;
+
+struct posix_clock;
+
+struct posix_clock_operations {
+ struct module *owner;
+ int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *);
+ int (*clock_gettime)(struct posix_clock *, struct timespec64 *);
+ int (*clock_getres)(struct posix_clock *, struct timespec64 *);
+ int (*clock_settime)(struct posix_clock *, const struct timespec64 *);
+ long int (*ioctl)(struct posix_clock *, unsigned int, long unsigned int);
+ int (*open)(struct posix_clock *, fmode_t);
+ __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *);
+ int (*release)(struct posix_clock *);
+ ssize_t (*read)(struct posix_clock *, uint, char *, size_t);
+};
+
+struct posix_clock {
+ struct posix_clock_operations ops;
+ struct cdev cdev;
+ struct device *dev;
+ struct rw_semaphore rwsem;
+ bool zombie;
+};
+
+struct posix_clock_desc {
+ struct file *fp;
+ struct posix_clock *clk;
+};
+
+struct __kernel_old_itimerval {
+ struct __kernel_old_timeval it_interval;
+ struct __kernel_old_timeval it_value;
+};
+
+struct ce_unbind {
+ struct clock_event_device *ce;
+ int res;
+};
+
+typedef ktime_t pto_T_____22;
+
+struct proc_timens_offset {
+ int clockid;
+ struct timespec64 val;
+};
+
+union futex_key {
+ struct {
+ u64 i_seq;
+ long unsigned int pgoff;
+ unsigned int offset;
+ } shared;
+ struct {
+ union {
+ struct mm_struct *mm;
+ u64 __tmp;
+ };
+ long unsigned int address;
+ unsigned int offset;
+ } private;
+ struct {
+ u64 ptr;
+ long unsigned int word;
+ unsigned int offset;
+ } both;
+};
+
+struct futex_pi_state {
+ struct list_head list;
+ struct rt_mutex pi_mutex;
+ struct task_struct *owner;
+ refcount_t refcount;
+ union futex_key key;
+};
+
+struct futex_q {
+ struct plist_node list;
+ struct task_struct *task;
+ spinlock_t *lock_ptr;
+ union futex_key key;
+ struct futex_pi_state *pi_state;
+ struct rt_mutex_waiter *rt_waiter;
+ union futex_key *requeue_pi_key;
+ u32 bitset;
+};
+
+struct futex_hash_bucket {
+ atomic_t waiters;
+ spinlock_t lock;
+ struct plist_head chain;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum futex_access {
+ FUTEX_READ = 0,
+ FUTEX_WRITE = 1,
+};
+
+struct dma_chan {
+ int lock;
+ const char *device_id;
+};
+
+typedef bool (*smp_cond_func_t)(int, void *);
+
+struct call_function_data {
+ call_single_data_t *csd;
+ cpumask_var_t cpumask;
+ cpumask_var_t cpumask_ipi;
+};
+
+struct smp_call_on_cpu_struct {
+ struct work_struct work;
+ struct completion done;
+ int (*func)(void *);
+ void *data;
+ int ret;
+ int cpu;
+};
+
+struct latch_tree_root {
+ seqcount_t seq;
+ struct rb_root tree[2];
+};
+
+struct latch_tree_ops {
+ bool (*less)(struct latch_tree_node *, struct latch_tree_node *);
+ int (*comp)(void *, struct latch_tree_node *);
+};
+
+struct module_use {
+ struct list_head source_list;
+ struct list_head target_list;
+ struct module *source;
+ struct module *target;
+};
+
+struct module_sect_attr {
+ struct module_attribute mattr;
+ char *name;
+ long unsigned int address;
+};
+
+struct module_sect_attrs {
+ struct attribute_group grp;
+ unsigned int nsections;
+ struct module_sect_attr attrs[0];
+};
+
+struct module_notes_attrs {
+ struct kobject *dir;
+ unsigned int notes;
+ struct bin_attribute attrs[0];
+};
+
+struct symsearch {
+ const struct kernel_symbol *start;
+ const struct kernel_symbol *stop;
+ const s32 *crcs;
+ enum {
+ NOT_GPL_ONLY = 0,
+ GPL_ONLY = 1,
+ WILL_BE_GPL_ONLY = 2,
+ } licence;
+ bool unused;
+};
+
+enum kernel_read_file_id {
+ READING_UNKNOWN = 0,
+ READING_FIRMWARE = 1,
+ READING_FIRMWARE_PREALLOC_BUFFER = 2,
+ READING_FIRMWARE_EFI_EMBEDDED = 3,
+ READING_MODULE = 4,
+ READING_KEXEC_IMAGE = 5,
+ READING_KEXEC_INITRAMFS = 6,
+ READING_POLICY = 7,
+ READING_X509_CERTIFICATE = 8,
+ READING_MAX_ID = 9,
+};
+
+enum kernel_load_data_id {
+ LOADING_UNKNOWN = 0,
+ LOADING_FIRMWARE = 1,
+ LOADING_FIRMWARE_PREALLOC_BUFFER = 2,
+ LOADING_FIRMWARE_EFI_EMBEDDED = 3,
+ LOADING_MODULE = 4,
+ LOADING_KEXEC_IMAGE = 5,
+ LOADING_KEXEC_INITRAMFS = 6,
+ LOADING_POLICY = 7,
+ LOADING_X509_CERTIFICATE = 8,
+ LOADING_MAX_ID = 9,
+};
+
+enum {
+ PROC_ENTRY_PERMANENT = 1,
+};
+
+struct _ddebug {
+ const char *modname;
+ const char *function;
+ const char *filename;
+ const char *format;
+ unsigned int lineno: 18;
+ unsigned int flags: 8;
+};
+
+struct load_info {
+ const char *name;
+ struct module *mod;
+ Elf64_Ehdr *hdr;
+ long unsigned int len;
+ Elf64_Shdr *sechdrs;
+ char *secstrings;
+ char *strtab;
+ long unsigned int symoffs;
+ long unsigned int stroffs;
+ long unsigned int init_typeoffs;
+ long unsigned int core_typeoffs;
+ struct _ddebug *debug;
+ unsigned int num_debug;
+ bool sig_ok;
+ long unsigned int mod_kallsyms_init_off;
+ struct {
+ unsigned int sym;
+ unsigned int str;
+ unsigned int mod;
+ unsigned int vers;
+ unsigned int info;
+ unsigned int pcpu;
+ } index;
+};
+
+struct trace_event_raw_module_load {
+ struct trace_entry ent;
+ unsigned int taints;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_module_free {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_module_refcnt {
+ struct trace_entry ent;
+ long unsigned int ip;
+ int refcnt;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_module_request {
+ struct trace_entry ent;
+ long unsigned int ip;
+ bool wait;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_module_load {
+ u32 name;
+};
+
+struct trace_event_data_offsets_module_free {
+ u32 name;
+};
+
+struct trace_event_data_offsets_module_refcnt {
+ u32 name;
+};
+
+struct trace_event_data_offsets_module_request {
+ u32 name;
+};
+
+typedef void (*btf_trace_module_load)(void *, struct module *);
+
+typedef void (*btf_trace_module_free)(void *, struct module *);
+
+typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int);
+
+typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int);
+
+typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int);
+
+struct mod_tree_root {
+ struct latch_tree_root root;
+ long unsigned int addr_min;
+ long unsigned int addr_max;
+};
+
+struct find_symbol_arg {
+ const char *name;
+ bool gplok;
+ bool warn;
+ struct module *owner;
+ const s32 *crc;
+ const struct kernel_symbol *sym;
+};
+
+struct mod_initfree {
+ struct llist_node node;
+ void *module_init;
+};
+
+struct kallsym_iter {
+ loff_t pos;
+ loff_t pos_arch_end;
+ loff_t pos_mod_end;
+ loff_t pos_ftrace_mod_end;
+ long unsigned int value;
+ unsigned int nameoff;
+ char type;
+ char name[128];
+ char module_name[56];
+ int exported;
+ int show_value;
+};
+
+typedef __kernel_ulong_t __kernel_ino_t;
+
+typedef __kernel_ino_t ino_t;
+
+enum kernfs_node_type {
+ KERNFS_DIR = 1,
+ KERNFS_FILE = 2,
+ KERNFS_LINK = 4,
+};
+
+enum kernfs_root_flag {
+ KERNFS_ROOT_CREATE_DEACTIVATED = 1,
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2,
+ KERNFS_ROOT_SUPPORT_EXPORTOP = 4,
+ KERNFS_ROOT_SUPPORT_USER_XATTR = 8,
+};
+
+struct kernfs_fs_context {
+ struct kernfs_root *root;
+ void *ns_tag;
+ long unsigned int magic;
+ bool new_sb_created;
+};
+
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+ enum bpf_attach_type type;
+};
+
+enum {
+ CGRP_NOTIFY_ON_RELEASE = 0,
+ CGRP_CPUSET_CLONE_CHILDREN = 1,
+ CGRP_FREEZE = 2,
+ CGRP_FROZEN = 3,
+};
+
+enum {
+ CGRP_ROOT_NOPREFIX = 2,
+ CGRP_ROOT_XATTR = 4,
+ CGRP_ROOT_NS_DELEGATE = 8,
+ CGRP_ROOT_CPUSET_V2_MODE = 16,
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = 32,
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = 64,
+};
+
+struct cgroup_taskset {
+ struct list_head src_csets;
+ struct list_head dst_csets;
+ int nr_tasks;
+ int ssid;
+ struct list_head *csets;
+ struct css_set *cur_cset;
+ struct task_struct *cur_task;
+};
+
+struct css_task_iter {
+ struct cgroup_subsys *ss;
+ unsigned int flags;
+ struct list_head *cset_pos;
+ struct list_head *cset_head;
+ struct list_head *tcset_pos;
+ struct list_head *tcset_head;
+ struct list_head *task_pos;
+ struct list_head *cur_tasks_head;
+ struct css_set *cur_cset;
+ struct css_set *cur_dcset;
+ struct task_struct *cur_task;
+ struct list_head iters_node;
+};
+
+struct cgroup_fs_context {
+ struct kernfs_fs_context kfc;
+ struct cgroup_root *root;
+ struct cgroup_namespace *ns;
+ unsigned int flags;
+ bool cpuset_clone_children;
+ bool none;
+ bool all_ss;
+ u16 subsys_mask;
+ char *name;
+ char *release_agent;
+};
+
+struct cgrp_cset_link {
+ struct cgroup *cgrp;
+ struct css_set *cset;
+ struct list_head cset_link;
+ struct list_head cgrp_link;
+};
+
+struct cgroup_mgctx {
+ struct list_head preloaded_src_csets;
+ struct list_head preloaded_dst_csets;
+ struct cgroup_taskset tset;
+ u16 ss_mask;
+};
+
+struct trace_event_raw_cgroup_root {
+ struct trace_entry ent;
+ int root;
+ u16 ss_mask;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_cgroup {
+ struct trace_entry ent;
+ int root;
+ int id;
+ int level;
+ u32 __data_loc_path;
+ char __data[0];
+};
+
+struct trace_event_raw_cgroup_migrate {
+ struct trace_entry ent;
+ int dst_root;
+ int dst_id;
+ int dst_level;
+ int pid;
+ u32 __data_loc_dst_path;
+ u32 __data_loc_comm;
+ char __data[0];
+};
+
+struct trace_event_raw_cgroup_event {
+ struct trace_entry ent;
+ int root;
+ int id;
+ int level;
+ u32 __data_loc_path;
+ int val;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_cgroup_root {
+ u32 name;
+};
+
+struct trace_event_data_offsets_cgroup {
+ u32 path;
+};
+
+struct trace_event_data_offsets_cgroup_migrate {
+ u32 dst_path;
+ u32 comm;
+};
+
+struct trace_event_data_offsets_cgroup_event {
+ u32 path;
+};
+
+typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *);
+
+typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *);
+
+typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *);
+
+typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *);
+
+typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool);
+
+typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool);
+
+typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int);
+
+typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int);
+
+enum cgroup2_param {
+ Opt_nsdelegate = 0,
+ Opt_memory_localevents = 1,
+ Opt_memory_recursiveprot = 2,
+ nr__cgroup2_params = 3,
+};
+
+struct cgroupstats {
+ __u64 nr_sleeping;
+ __u64 nr_running;
+ __u64 nr_stopped;
+ __u64 nr_uninterruptible;
+ __u64 nr_io_wait;
+};
+
+enum cgroup_filetype {
+ CGROUP_FILE_PROCS = 0,
+ CGROUP_FILE_TASKS = 1,
+};
+
+struct cgroup_pidlist {
+ struct {
+ enum cgroup_filetype type;
+ struct pid_namespace *ns;
+ } key;
+ pid_t *list;
+ int length;
+ struct list_head links;
+ struct cgroup *owner;
+ struct delayed_work destroy_dwork;
+};
+
+enum cgroup1_param {
+ Opt_all = 0,
+ Opt_clone_children = 1,
+ Opt_cpuset_v2_mode = 2,
+ Opt_name = 3,
+ Opt_none = 4,
+ Opt_noprefix = 5,
+ Opt_release_agent = 6,
+ Opt_xattr = 7,
+};
+
+enum freezer_state_flags {
+ CGROUP_FREEZER_ONLINE = 1,
+ CGROUP_FREEZING_SELF = 2,
+ CGROUP_FREEZING_PARENT = 4,
+ CGROUP_FROZEN = 8,
+ CGROUP_FREEZING = 6,
+};
+
+struct freezer {
+ struct cgroup_subsys_state css;
+ unsigned int state;
+};
+
+struct pids_cgroup {
+ struct cgroup_subsys_state css;
+ atomic64_t counter;
+ atomic64_t limit;
+ struct cgroup_file events_file;
+ atomic64_t events_limit;
+};
+
+struct fmeter {
+ int cnt;
+ int val;
+ time64_t time;
+ spinlock_t lock;
+};
+
+struct cpuset {
+ struct cgroup_subsys_state css;
+ long unsigned int flags;
+ cpumask_var_t cpus_allowed;
+ nodemask_t mems_allowed;
+ cpumask_var_t effective_cpus;
+ nodemask_t effective_mems;
+ cpumask_var_t subparts_cpus;
+ nodemask_t old_mems_allowed;
+ struct fmeter fmeter;
+ int attach_in_progress;
+ int pn;
+ int relax_domain_level;
+ int nr_subparts_cpus;
+ int partition_root_state;
+ int use_parent_ecpus;
+ int child_ecpus_count;
+};
+
+struct tmpmasks {
+ cpumask_var_t addmask;
+ cpumask_var_t delmask;
+ cpumask_var_t new_cpus;
+};
+
+typedef enum {
+ CS_ONLINE = 0,
+ CS_CPU_EXCLUSIVE = 1,
+ CS_MEM_EXCLUSIVE = 2,
+ CS_MEM_HARDWALL = 3,
+ CS_MEMORY_MIGRATE = 4,
+ CS_SCHED_LOAD_BALANCE = 5,
+ CS_SPREAD_PAGE = 6,
+ CS_SPREAD_SLAB = 7,
+} cpuset_flagbits_t;
+
+enum subparts_cmd {
+ partcmd_enable = 0,
+ partcmd_disable = 1,
+ partcmd_update = 2,
+};
+
+struct cpuset_migrate_mm_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ nodemask_t from;
+ nodemask_t to;
+};
+
+typedef enum {
+ FILE_MEMORY_MIGRATE = 0,
+ FILE_CPULIST = 1,
+ FILE_MEMLIST = 2,
+ FILE_EFFECTIVE_CPULIST = 3,
+ FILE_EFFECTIVE_MEMLIST = 4,
+ FILE_SUBPARTS_CPULIST = 5,
+ FILE_CPU_EXCLUSIVE = 6,
+ FILE_MEM_EXCLUSIVE = 7,
+ FILE_MEM_HARDWALL = 8,
+ FILE_SCHED_LOAD_BALANCE = 9,
+ FILE_PARTITION_ROOT = 10,
+ FILE_SCHED_RELAX_DOMAIN_LEVEL = 11,
+ FILE_MEMORY_PRESSURE_ENABLED = 12,
+ FILE_MEMORY_PRESSURE = 13,
+ FILE_SPREAD_PAGE = 14,
+ FILE_SPREAD_SLAB = 15,
+} cpuset_filetype_t;
+
+struct idmap_key {
+ bool map_up;
+ u32 id;
+ u32 count;
+};
+
+struct cpu_stop_done {
+ atomic_t nr_todo;
+ int ret;
+ struct completion completion;
+};
+
+struct cpu_stopper {
+ struct task_struct *thread;
+ raw_spinlock_t lock;
+ bool enabled;
+ struct list_head works;
+ struct cpu_stop_work stop_work;
+};
+
+enum multi_stop_state {
+ MULTI_STOP_NONE = 0,
+ MULTI_STOP_PREPARE = 1,
+ MULTI_STOP_DISABLE_IRQ = 2,
+ MULTI_STOP_RUN = 3,
+ MULTI_STOP_EXIT = 4,
+};
+
+struct multi_stop_data {
+ cpu_stop_fn_t fn;
+ void *data;
+ unsigned int num_threads;
+ const struct cpumask *active_cpus;
+ enum multi_stop_state state;
+ atomic_t thread_ack;
+};
+
+typedef int __kernel_mqd_t;
+
+typedef __kernel_mqd_t mqd_t;
+
+enum audit_state {
+ AUDIT_DISABLED = 0,
+ AUDIT_BUILD_CONTEXT = 1,
+ AUDIT_RECORD_CONTEXT = 2,
+};
+
+struct audit_cap_data {
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+ union {
+ unsigned int fE;
+ kernel_cap_t effective;
+ };
+ kernel_cap_t ambient;
+ kuid_t rootid;
+};
+
+struct audit_names {
+ struct list_head list;
+ struct filename *name;
+ int name_len;
+ bool hidden;
+ long unsigned int ino;
+ dev_t dev;
+ umode_t mode;
+ kuid_t uid;
+ kgid_t gid;
+ dev_t rdev;
+ u32 osid;
+ struct audit_cap_data fcap;
+ unsigned int fcap_ver;
+ unsigned char type;
+ bool should_free;
+};
+
+struct mq_attr {
+ __kernel_long_t mq_flags;
+ __kernel_long_t mq_maxmsg;
+ __kernel_long_t mq_msgsize;
+ __kernel_long_t mq_curmsgs;
+ __kernel_long_t __reserved[4];
+};
+
+struct audit_proctitle {
+ int len;
+ char *value;
+};
+
+struct audit_aux_data;
+
+struct audit_tree_refs;
+
+struct audit_context {
+ int dummy;
+ int in_syscall;
+ enum audit_state state;
+ enum audit_state current_state;
+ unsigned int serial;
+ int major;
+ struct timespec64 ctime;
+ long unsigned int argv[4];
+ long int return_code;
+ u64 prio;
+ int return_valid;
+ struct audit_names preallocated_names[5];
+ int name_count;
+ struct list_head names_list;
+ char *filterkey;
+ struct path pwd;
+ struct audit_aux_data *aux;
+ struct audit_aux_data *aux_pids;
+ struct __kernel_sockaddr_storage *sockaddr;
+ size_t sockaddr_len;
+ pid_t pid;
+ pid_t ppid;
+ kuid_t uid;
+ kuid_t euid;
+ kuid_t suid;
+ kuid_t fsuid;
+ kgid_t gid;
+ kgid_t egid;
+ kgid_t sgid;
+ kgid_t fsgid;
+ long unsigned int personality;
+ int arch;
+ pid_t target_pid;
+ kuid_t target_auid;
+ kuid_t target_uid;
+ unsigned int target_sessionid;
+ u32 target_sid;
+ char target_comm[16];
+ struct audit_tree_refs *trees;
+ struct audit_tree_refs *first_trees;
+ struct list_head killed_trees;
+ int tree_count;
+ int type;
+ union {
+ struct {
+ int nargs;
+ long int args[6];
+ } socketcall;
+ struct {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+ u32 osid;
+ int has_perm;
+ uid_t perm_uid;
+ gid_t perm_gid;
+ umode_t perm_mode;
+ long unsigned int qbytes;
+ } ipc;
+ struct {
+ mqd_t mqdes;
+ struct mq_attr mqstat;
+ } mq_getsetattr;
+ struct {
+ mqd_t mqdes;
+ int sigev_signo;
+ } mq_notify;
+ struct {
+ mqd_t mqdes;
+ size_t msg_len;
+ unsigned int msg_prio;
+ struct timespec64 abs_timeout;
+ } mq_sendrecv;
+ struct {
+ int oflag;
+ umode_t mode;
+ struct mq_attr attr;
+ } mq_open;
+ struct {
+ pid_t pid;
+ struct audit_cap_data cap;
+ } capset;
+ struct {
+ int fd;
+ int flags;
+ } mmap;
+ struct {
+ int argc;
+ } execve;
+ struct {
+ char *name;
+ } module;
+ };
+ int fds[2];
+ struct audit_proctitle proctitle;
+};
+
+enum audit_nlgrps {
+ AUDIT_NLGRP_NONE = 0,
+ AUDIT_NLGRP_READLOG = 1,
+ __AUDIT_NLGRP_MAX = 2,
+};
+
+struct audit_status {
+ __u32 mask;
+ __u32 enabled;
+ __u32 failure;
+ __u32 pid;
+ __u32 rate_limit;
+ __u32 backlog_limit;
+ __u32 lost;
+ __u32 backlog;
+ union {
+ __u32 version;
+ __u32 feature_bitmap;
+ };
+ __u32 backlog_wait_time;
+};
+
+struct audit_features {
+ __u32 vers;
+ __u32 mask;
+ __u32 features;
+ __u32 lock;
+};
+
+struct audit_tty_status {
+ __u32 enabled;
+ __u32 log_passwd;
+};
+
+struct audit_sig_info {
+ uid_t uid;
+ pid_t pid;
+ char ctx[0];
+};
+
+struct net_generic {
+ union {
+ struct {
+ unsigned int len;
+ struct callback_head rcu;
+ } s;
+ void *ptr[0];
+ };
+};
+
+struct scm_creds {
+ u32 pid;
+ kuid_t uid;
+ kgid_t gid;
+};
+
+struct netlink_skb_parms {
+ struct scm_creds creds;
+ __u32 portid;
+ __u32 dst_group;
+ __u32 flags;
+ struct sock *sk;
+ bool nsid_is_set;
+ int nsid;
+};
+
+struct netlink_kernel_cfg {
+ unsigned int groups;
+ unsigned int flags;
+ void (*input)(struct sk_buff *);
+ struct mutex *cb_mutex;
+ int (*bind)(struct net *, int);
+ void (*unbind)(struct net *, int);
+ bool (*compare)(struct net *, struct sock *);
+};
+
+struct audit_netlink_list {
+ __u32 portid;
+ struct net *net;
+ struct sk_buff_head q;
+};
+
+struct audit_net {
+ struct sock *sk;
+};
+
+struct auditd_connection {
+ struct pid *pid;
+ u32 portid;
+ struct net *net;
+ struct callback_head rcu;
+};
+
+struct audit_ctl_mutex {
+ struct mutex lock;
+ void *owner;
+};
+
+struct audit_buffer {
+ struct sk_buff *skb;
+ struct audit_context *ctx;
+ gfp_t gfp_mask;
+};
+
+struct audit_reply {
+ __u32 portid;
+ struct net *net;
+ struct sk_buff *skb;
+};
+
+enum {
+ Audit_equal = 0,
+ Audit_not_equal = 1,
+ Audit_bitmask = 2,
+ Audit_bittest = 3,
+ Audit_lt = 4,
+ Audit_gt = 5,
+ Audit_le = 6,
+ Audit_ge = 7,
+ Audit_bad = 8,
+};
+
+struct audit_rule_data {
+ __u32 flags;
+ __u32 action;
+ __u32 field_count;
+ __u32 mask[64];
+ __u32 fields[64];
+ __u32 values[64];
+ __u32 fieldflags[64];
+ __u32 buflen;
+ char buf[0];
+};
+
+struct audit_field;
+
+struct audit_watch;
+
+struct audit_tree;
+
+struct audit_fsnotify_mark;
+
+struct audit_krule {
+ u32 pflags;
+ u32 flags;
+ u32 listnr;
+ u32 action;
+ u32 mask[64];
+ u32 buflen;
+ u32 field_count;
+ char *filterkey;
+ struct audit_field *fields;
+ struct audit_field *arch_f;
+ struct audit_field *inode_f;
+ struct audit_watch *watch;
+ struct audit_tree *tree;
+ struct audit_fsnotify_mark *exe;
+ struct list_head rlist;
+ struct list_head list;
+ u64 prio;
+};
+
+struct audit_field {
+ u32 type;
+ union {
+ u32 val;
+ kuid_t uid;
+ kgid_t gid;
+ struct {
+ char *lsm_str;
+ void *lsm_rule;
+ };
+ };
+ u32 op;
+};
+
+struct audit_entry {
+ struct list_head list;
+ struct callback_head rcu;
+ struct audit_krule rule;
+};
+
+struct audit_buffer___2;
+
+typedef int __kernel_key_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+typedef __kernel_key_t key_t;
+
+struct cpu_vfs_cap_data {
+ __u32 magic_etc;
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+ kuid_t rootid;
+};
+
+struct kern_ipc_perm {
+ spinlock_t lock;
+ bool deleted;
+ int id;
+ key_t key;
+ kuid_t uid;
+ kgid_t gid;
+ kuid_t cuid;
+ kgid_t cgid;
+ umode_t mode;
+ long unsigned int seq;
+ void *security;
+ struct rhash_head khtnode;
+ struct callback_head rcu;
+ refcount_t refcount;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef struct fsnotify_mark_connector *fsnotify_connp_t;
+
+struct fsnotify_mark_connector {
+ spinlock_t lock;
+ short unsigned int type;
+ short unsigned int flags;
+ __kernel_fsid_t fsid;
+ union {
+ fsnotify_connp_t *obj;
+ struct fsnotify_mark_connector *destroy_next;
+ };
+ struct hlist_head list;
+};
+
+enum audit_nfcfgop {
+ AUDIT_XT_OP_REGISTER = 0,
+ AUDIT_XT_OP_REPLACE = 1,
+ AUDIT_XT_OP_UNREGISTER = 2,
+};
+
+enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_INODE = 0,
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1,
+ FSNOTIFY_OBJ_TYPE_SB = 2,
+ FSNOTIFY_OBJ_TYPE_COUNT = 3,
+ FSNOTIFY_OBJ_TYPE_DETACHED = 3,
+};
+
+struct audit_aux_data {
+ struct audit_aux_data *next;
+ int type;
+};
+
+struct audit_chunk;
+
+struct audit_tree_refs {
+ struct audit_tree_refs *next;
+ struct audit_chunk *c[31];
+};
+
+struct audit_aux_data_pids {
+ struct audit_aux_data d;
+ pid_t target_pid[16];
+ kuid_t target_auid[16];
+ kuid_t target_uid[16];
+ unsigned int target_sessionid[16];
+ u32 target_sid[16];
+ char target_comm[256];
+ int pid_count;
+};
+
+struct audit_aux_data_bprm_fcaps {
+ struct audit_aux_data d;
+ struct audit_cap_data fcap;
+ unsigned int fcap_ver;
+ struct audit_cap_data old_pcap;
+ struct audit_cap_data new_pcap;
+};
+
+struct audit_nfcfgop_tab {
+ enum audit_nfcfgop op;
+ const char *s;
+};
+
+struct audit_parent;
+
+struct audit_watch {
+ refcount_t count;
+ dev_t dev;
+ char *path;
+ long unsigned int ino;
+ struct audit_parent *parent;
+ struct list_head wlist;
+ struct list_head rules;
+};
+
+struct fsnotify_group;
+
+struct fsnotify_iter_info;
+
+struct fsnotify_mark;
+
+struct fsnotify_event;
+
+struct fsnotify_ops {
+ int (*handle_event)(struct fsnotify_group *, struct inode *, u32, const void *, int, const struct qstr *, u32, struct fsnotify_iter_info *);
+ void (*free_group_priv)(struct fsnotify_group *);
+ void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *);
+ void (*free_event)(struct fsnotify_event *);
+ void (*free_mark)(struct fsnotify_mark *);
+};
+
+struct inotify_group_private_data {
+ spinlock_t idr_lock;
+ struct idr idr;
+ struct ucounts *ucounts;
+};
+
+struct fanotify_group_private_data {
+ struct list_head access_list;
+ wait_queue_head_t access_waitq;
+ int flags;
+ int f_flags;
+ unsigned int max_marks;
+ struct user_struct *user;
+};
+
+struct fsnotify_group {
+ const struct fsnotify_ops *ops;
+ refcount_t refcnt;
+ spinlock_t notification_lock;
+ struct list_head notification_list;
+ wait_queue_head_t notification_waitq;
+ unsigned int q_len;
+ unsigned int max_events;
+ unsigned int priority;
+ bool shutdown;
+ struct mutex mark_mutex;
+ atomic_t num_marks;
+ atomic_t user_waits;
+ struct list_head marks_list;
+ struct fasync_struct *fsn_fa;
+ struct fsnotify_event *overflow_event;
+ struct mem_cgroup *memcg;
+ union {
+ void *private;
+ struct inotify_group_private_data inotify_data;
+ struct fanotify_group_private_data fanotify_data;
+ };
+};
+
+struct fsnotify_iter_info {
+ struct fsnotify_mark *marks[3];
+ unsigned int report_mask;
+ int srcu_idx;
+};
+
+struct fsnotify_mark {
+ __u32 mask;
+ refcount_t refcnt;
+ struct fsnotify_group *group;
+ struct list_head g_list;
+ spinlock_t lock;
+ struct hlist_node obj_list;
+ struct fsnotify_mark_connector *connector;
+ __u32 ignored_mask;
+ unsigned int flags;
+};
+
+struct fsnotify_event {
+ struct list_head list;
+ long unsigned int objectid;
+};
+
+enum fsnotify_data_type {
+ FSNOTIFY_EVENT_NONE = 0,
+ FSNOTIFY_EVENT_PATH = 1,
+ FSNOTIFY_EVENT_INODE = 2,
+};
+
+struct audit_parent {
+ struct list_head watches;
+ struct fsnotify_mark mark;
+};
+
+struct audit_fsnotify_mark {
+ dev_t dev;
+ long unsigned int ino;
+ char *path;
+ struct fsnotify_mark mark;
+ struct audit_krule *rule;
+};
+
+struct audit_chunk___2;
+
+struct audit_tree {
+ refcount_t count;
+ int goner;
+ struct audit_chunk___2 *root;
+ struct list_head chunks;
+ struct list_head rules;
+ struct list_head list;
+ struct list_head same_root;
+ struct callback_head head;
+ char pathname[0];
+};
+
+struct node___2 {
+ struct list_head list;
+ struct audit_tree *owner;
+ unsigned int index;
+};
+
+struct audit_chunk___2 {
+ struct list_head hash;
+ long unsigned int key;
+ struct fsnotify_mark *mark;
+ struct list_head trees;
+ int count;
+ atomic_long_t refs;
+ struct callback_head head;
+ struct node___2 owners[0];
+};
+
+struct audit_tree_mark {
+ struct fsnotify_mark mark;
+ struct audit_chunk___2 *chunk;
+};
+
+enum {
+ HASH_SIZE = 128,
+};
+
+struct kprobe_blacklist_entry {
+ struct list_head list;
+ long unsigned int start_addr;
+ long unsigned int end_addr;
+};
+
+struct kprobe_insn_page {
+ struct list_head list;
+ kprobe_opcode_t *insns;
+ struct kprobe_insn_cache *cache;
+ int nused;
+ int ngarbage;
+ char slot_used[0];
+};
+
+enum kprobe_slot_state {
+ SLOT_CLEAN = 0,
+ SLOT_DIRTY = 1,
+ SLOT_USED = 2,
+};
+
+struct seccomp_notif_sizes {
+ __u16 seccomp_notif;
+ __u16 seccomp_notif_resp;
+ __u16 seccomp_data;
+};
+
+struct seccomp_notif {
+ __u64 id;
+ __u32 pid;
+ __u32 flags;
+ struct seccomp_data data;
+};
+
+struct seccomp_notif_resp {
+ __u64 id;
+ __s64 val;
+ __s32 error;
+ __u32 flags;
+};
+
+struct notification;
+
+struct seccomp_filter {
+ refcount_t usage;
+ bool log;
+ struct seccomp_filter *prev;
+ struct bpf_prog *prog;
+ struct notification *notif;
+ struct mutex notify_lock;
+};
+
+struct ctl_path {
+ const char *procname;
+};
+
+struct sock_fprog {
+ short unsigned int len;
+ struct sock_filter *filter;
+};
+
+enum notify_state {
+ SECCOMP_NOTIFY_INIT = 0,
+ SECCOMP_NOTIFY_SENT = 1,
+ SECCOMP_NOTIFY_REPLIED = 2,
+};
+
+struct seccomp_knotif {
+ struct task_struct *task;
+ u64 id;
+ const struct seccomp_data *data;
+ enum notify_state state;
+ int error;
+ long int val;
+ u32 flags;
+ struct completion ready;
+ struct list_head list;
+};
+
+struct notification {
+ struct semaphore request;
+ u64 next_id;
+ struct list_head notifications;
+ wait_queue_head_t wqh;
+};
+
+struct seccomp_log_name {
+ u32 log;
+ const char *name;
+};
+
+struct tp_module {
+ struct list_head list;
+ struct module *mod;
+};
+
+struct tp_probes {
+ struct callback_head rcu;
+ struct tracepoint_func probes[0];
+};
+
+enum ring_buffer_type {
+ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
+ RINGBUF_TYPE_PADDING = 29,
+ RINGBUF_TYPE_TIME_EXTEND = 30,
+ RINGBUF_TYPE_TIME_STAMP = 31,
+};
+
+enum ring_buffer_flags {
+ RB_FL_OVERWRITE = 1,
+};
+
+struct ring_buffer_per_cpu;
+
+struct buffer_page;
+
+struct ring_buffer_iter {
+ struct ring_buffer_per_cpu *cpu_buffer;
+ long unsigned int head;
+ long unsigned int next_event;
+ struct buffer_page *head_page;
+ struct buffer_page *cache_reader_page;
+ long unsigned int cache_read;
+ u64 read_stamp;
+ u64 page_stamp;
+ struct ring_buffer_event *event;
+ int missed_events;
+};
+
+struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
+ wait_queue_head_t full_waiters;
+ bool waiters_pending;
+ bool full_waiters_pending;
+ bool wakeup_full;
+};
+
+struct trace_buffer___2 {
+ unsigned int flags;
+ int cpus;
+ atomic_t record_disabled;
+ cpumask_var_t cpumask;
+ struct lock_class_key *reader_lock_key;
+ struct mutex mutex;
+ struct ring_buffer_per_cpu **buffers;
+ struct hlist_node node;
+ u64 (*clock)();
+ struct rb_irq_work irq_work;
+ bool time_stamp_abs;
+};
+
+enum {
+ RB_LEN_TIME_EXTEND = 8,
+ RB_LEN_TIME_STAMP = 8,
+};
+
+struct buffer_data_page {
+ u64 time_stamp;
+ local_t commit;
+ unsigned char data[0];
+};
+
+struct buffer_page {
+ struct list_head list;
+ local_t write;
+ unsigned int read;
+ local_t entries;
+ long unsigned int real_end;
+ struct buffer_data_page *page;
+};
+
+struct rb_event_info {
+ u64 ts;
+ u64 delta;
+ long unsigned int length;
+ struct buffer_page *tail_page;
+ int add_timestamp;
+};
+
+enum {
+ RB_CTX_NMI = 0,
+ RB_CTX_IRQ = 1,
+ RB_CTX_SOFTIRQ = 2,
+ RB_CTX_NORMAL = 3,
+ RB_CTX_MAX = 4,
+};
+
+struct ring_buffer_per_cpu {
+ int cpu;
+ atomic_t record_disabled;
+ atomic_t resize_disabled;
+ struct trace_buffer___2 *buffer;
+ raw_spinlock_t reader_lock;
+ arch_spinlock_t lock;
+ struct lock_class_key lock_key;
+ struct buffer_data_page *free_page;
+ long unsigned int nr_pages;
+ unsigned int current_context;
+ struct list_head *pages;
+ struct buffer_page *head_page;
+ struct buffer_page *tail_page;
+ struct buffer_page *commit_page;
+ struct buffer_page *reader_page;
+ long unsigned int lost_events;
+ long unsigned int last_overrun;
+ long unsigned int nest;
+ local_t entries_bytes;
+ local_t entries;
+ local_t overrun;
+ local_t commit_overrun;
+ local_t dropped_events;
+ local_t committing;
+ local_t commits;
+ local_t pages_touched;
+ local_t pages_read;
+ long int last_pages_touch;
+ size_t shortest_full;
+ long unsigned int read;
+ long unsigned int read_bytes;
+ u64 write_stamp;
+ u64 read_stamp;
+ long int nr_pages_to_update;
+ struct list_head new_pages;
+ struct work_struct update_pages_work;
+ struct completion update_done;
+ struct rb_irq_work irq_work;
+};
+
+struct partial_page {
+ unsigned int offset;
+ unsigned int len;
+ long unsigned int private;
+};
+
+struct splice_pipe_desc {
+ struct page **pages;
+ struct partial_page *partial;
+ int nr_pages;
+ unsigned int nr_pages_max;
+ const struct pipe_buf_operations *ops;
+ void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+};
+
+struct trace_export {
+ struct trace_export *next;
+ void (*write)(struct trace_export *, const void *, unsigned int);
+};
+
+struct prog_entry;
+
+struct event_filter {
+ struct prog_entry *prog;
+ char *filter_string;
+};
+
+struct trace_array_cpu;
+
+struct array_buffer {
+ struct trace_array *tr;
+ struct trace_buffer *buffer;
+ struct trace_array_cpu *data;
+ u64 time_start;
+ int cpu;
+};
+
+struct trace_pid_list;
+
+struct trace_options;
+
+struct trace_array {
+ struct list_head list;
+ char *name;
+ struct array_buffer array_buffer;
+ struct trace_pid_list *filtered_pids;
+ struct trace_pid_list *filtered_no_pids;
+ arch_spinlock_t max_lock;
+ int buffer_disabled;
+ int stop_count;
+ int clock_id;
+ int nr_topts;
+ bool clear_trace;
+ int buffer_percent;
+ unsigned int n_err_log_entries;
+ struct tracer *current_trace;
+ unsigned int trace_flags;
+ unsigned char trace_flags_index[32];
+ unsigned int flags;
+ raw_spinlock_t start_lock;
+ struct list_head err_log;
+ struct dentry *dir;
+ struct dentry *options;
+ struct dentry *percpu_dir;
+ struct dentry *event_dir;
+ struct trace_options *topts;
+ struct list_head systems;
+ struct list_head events;
+ struct trace_event_file *trace_marker_file;
+ cpumask_var_t tracing_cpumask;
+ int ref;
+ int time_stamp_abs_ref;
+ struct list_head hist_vars;
+};
+
+struct tracer_flags;
+
+struct tracer {
+ const char *name;
+ int (*init)(struct trace_array *);
+ void (*reset)(struct trace_array *);
+ void (*start)(struct trace_array *);
+ void (*stop)(struct trace_array *);
+ int (*update_thresh)(struct trace_array *);
+ void (*open)(struct trace_iterator *);
+ void (*pipe_open)(struct trace_iterator *);
+ void (*close)(struct trace_iterator *);
+ void (*pipe_close)(struct trace_iterator *);
+ ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *);
+ ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+ void (*print_header)(struct seq_file *);
+ enum print_line_t (*print_line)(struct trace_iterator *);
+ int (*set_flag)(struct trace_array *, u32, u32, int);
+ int (*flag_changed)(struct trace_array *, u32, int);
+ struct tracer *next;
+ struct tracer_flags *flags;
+ int enabled;
+ int ref;
+ bool print_max;
+ bool allow_instances;
+ bool noboot;
+};
+
+enum trace_iter_flags {
+ TRACE_FILE_LAT_FMT = 1,
+ TRACE_FILE_ANNOTATE = 2,
+ TRACE_FILE_TIME_IN_NS = 4,
+};
+
+struct event_subsystem;
+
+struct trace_subsystem_dir {
+ struct list_head list;
+ struct event_subsystem *subsystem;
+ struct trace_array *tr;
+ struct dentry *entry;
+ int ref_count;
+ int nr_events;
+};
+
+enum event_trigger_type {
+ ETT_NONE = 0,
+ ETT_TRACE_ONOFF = 1,
+ ETT_SNAPSHOT = 2,
+ ETT_STACKTRACE = 4,
+ ETT_EVENT_ENABLE = 8,
+ ETT_EVENT_HIST = 16,
+ ETT_HIST_ENABLE = 32,
+};
+
+enum trace_type {
+ __TRACE_FIRST_TYPE = 0,
+ TRACE_FN = 1,
+ TRACE_CTX = 2,
+ TRACE_WAKE = 3,
+ TRACE_STACK = 4,
+ TRACE_PRINT = 5,
+ TRACE_BPRINT = 6,
+ TRACE_MMIO_RW = 7,
+ TRACE_MMIO_MAP = 8,
+ TRACE_BRANCH = 9,
+ TRACE_GRAPH_RET = 10,
+ TRACE_GRAPH_ENT = 11,
+ TRACE_USER_STACK = 12,
+ TRACE_BLK = 13,
+ TRACE_BPUTS = 14,
+ TRACE_HWLAT = 15,
+ TRACE_RAW_DATA = 16,
+ __TRACE_LAST_TYPE = 17,
+};
+
+struct ftrace_entry {
+ struct trace_entry ent;
+ long unsigned int ip;
+ long unsigned int parent_ip;
+};
+
+struct stack_entry {
+ struct trace_entry ent;
+ int size;
+ long unsigned int caller[8];
+};
+
+struct userstack_entry {
+ struct trace_entry ent;
+ unsigned int tgid;
+ long unsigned int caller[8];
+};
+
+struct bprint_entry {
+ struct trace_entry ent;
+ long unsigned int ip;
+ const char *fmt;
+ u32 buf[0];
+};
+
+struct print_entry {
+ struct trace_entry ent;
+ long unsigned int ip;
+ char buf[0];
+};
+
+struct raw_data_entry {
+ struct trace_entry ent;
+ unsigned int id;
+ char buf[0];
+};
+
+struct bputs_entry {
+ struct trace_entry ent;
+ long unsigned int ip;
+ const char *str;
+};
+
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 1,
+ TRACE_FLAG_IRQS_NOSUPPORT = 2,
+ TRACE_FLAG_NEED_RESCHED = 4,
+ TRACE_FLAG_HARDIRQ = 8,
+ TRACE_FLAG_SOFTIRQ = 16,
+ TRACE_FLAG_PREEMPT_RESCHED = 32,
+ TRACE_FLAG_NMI = 64,
+};
+
+struct trace_array_cpu {
+ atomic_t disabled;
+ void *buffer_page;
+ long unsigned int entries;
+ long unsigned int saved_latency;
+ long unsigned int critical_start;
+ long unsigned int critical_end;
+ long unsigned int critical_sequence;
+ long unsigned int nice;
+ long unsigned int policy;
+ long unsigned int rt_priority;
+ long unsigned int skipped_entries;
+ u64 preempt_timestamp;
+ pid_t pid;
+ kuid_t uid;
+ char comm[16];
+ bool ignore_pid;
+};
+
+struct trace_option_dentry;
+
+struct trace_options {
+ struct tracer *tracer;
+ struct trace_option_dentry *topts;
+};
+
+struct tracer_opt;
+
+struct trace_option_dentry {
+ struct tracer_opt *opt;
+ struct tracer_flags *flags;
+ struct trace_array *tr;
+ struct dentry *entry;
+};
+
+struct trace_pid_list {
+ int pid_max;
+ long unsigned int *pids;
+};
+
+typedef bool (*cond_update_fn_t)(struct trace_array *, void *);
+
+enum {
+ TRACE_ARRAY_FL_GLOBAL = 1,
+};
+
+struct tracer_opt {
+ const char *name;
+ u32 bit;
+};
+
+struct tracer_flags {
+ u32 val;
+ struct tracer_opt *opts;
+ struct tracer *trace;
+};
+
+struct trace_parser {
+ bool cont;
+ char *buffer;
+ unsigned int idx;
+ unsigned int size;
+};
+
+enum trace_iterator_bits {
+ TRACE_ITER_PRINT_PARENT_BIT = 0,
+ TRACE_ITER_SYM_OFFSET_BIT = 1,
+ TRACE_ITER_SYM_ADDR_BIT = 2,
+ TRACE_ITER_VERBOSE_BIT = 3,
+ TRACE_ITER_RAW_BIT = 4,
+ TRACE_ITER_HEX_BIT = 5,
+ TRACE_ITER_BIN_BIT = 6,
+ TRACE_ITER_BLOCK_BIT = 7,
+ TRACE_ITER_PRINTK_BIT = 8,
+ TRACE_ITER_ANNOTATE_BIT = 9,
+ TRACE_ITER_USERSTACKTRACE_BIT = 10,
+ TRACE_ITER_SYM_USEROBJ_BIT = 11,
+ TRACE_ITER_PRINTK_MSGONLY_BIT = 12,
+ TRACE_ITER_CONTEXT_INFO_BIT = 13,
+ TRACE_ITER_LATENCY_FMT_BIT = 14,
+ TRACE_ITER_RECORD_CMD_BIT = 15,
+ TRACE_ITER_RECORD_TGID_BIT = 16,
+ TRACE_ITER_OVERWRITE_BIT = 17,
+ TRACE_ITER_STOP_ON_FREE_BIT = 18,
+ TRACE_ITER_IRQ_INFO_BIT = 19,
+ TRACE_ITER_MARKERS_BIT = 20,
+ TRACE_ITER_EVENT_FORK_BIT = 21,
+ TRACE_ITER_PAUSE_ON_TRACE_BIT = 22,
+ TRACE_ITER_STACKTRACE_BIT = 23,
+ TRACE_ITER_LAST_BIT = 24,
+};
+
+enum trace_iterator_flags {
+ TRACE_ITER_PRINT_PARENT = 1,
+ TRACE_ITER_SYM_OFFSET = 2,
+ TRACE_ITER_SYM_ADDR = 4,
+ TRACE_ITER_VERBOSE = 8,
+ TRACE_ITER_RAW = 16,
+ TRACE_ITER_HEX = 32,
+ TRACE_ITER_BIN = 64,
+ TRACE_ITER_BLOCK = 128,
+ TRACE_ITER_PRINTK = 256,
+ TRACE_ITER_ANNOTATE = 512,
+ TRACE_ITER_USERSTACKTRACE = 1024,
+ TRACE_ITER_SYM_USEROBJ = 2048,
+ TRACE_ITER_PRINTK_MSGONLY = 4096,
+ TRACE_ITER_CONTEXT_INFO = 8192,
+ TRACE_ITER_LATENCY_FMT = 16384,
+ TRACE_ITER_RECORD_CMD = 32768,
+ TRACE_ITER_RECORD_TGID = 65536,
+ TRACE_ITER_OVERWRITE = 131072,
+ TRACE_ITER_STOP_ON_FREE = 262144,
+ TRACE_ITER_IRQ_INFO = 524288,
+ TRACE_ITER_MARKERS = 1048576,
+ TRACE_ITER_EVENT_FORK = 2097152,
+ TRACE_ITER_PAUSE_ON_TRACE = 4194304,
+ TRACE_ITER_STACKTRACE = 8388608,
+};
+
+struct event_subsystem {
+ struct list_head list;
+ const char *name;
+ struct event_filter *filter;
+ int ref_count;
+};
+
+struct saved_cmdlines_buffer {
+ unsigned int map_pid_to_cmdline[32769];
+ unsigned int *map_cmdline_to_pid;
+ unsigned int cmdline_num;
+ int cmdline_idx;
+ char *saved_cmdlines;
+};
+
+struct ftrace_stack {
+ long unsigned int calls[1024];
+};
+
+struct ftrace_stacks {
+ struct ftrace_stack stacks[4];
+};
+
+struct trace_buffer_struct {
+ int nesting;
+ char buffer[4096];
+};
+
+struct ftrace_buffer_info {
+ struct trace_iterator iter;
+ void *spare;
+ unsigned int spare_cpu;
+ unsigned int read;
+};
+
+struct err_info {
+ const char **errs;
+ u8 type;
+ u8 pos;
+ u64 ts;
+};
+
+struct tracing_log_err {
+ struct list_head list;
+ struct err_info info;
+ char loc[128];
+ char cmd[256];
+};
+
+struct buffer_ref {
+ struct trace_buffer *buffer;
+ void *page;
+ int cpu;
+ refcount_t refcount;
+};
+
+struct ctx_switch_entry {
+ struct trace_entry ent;
+ unsigned int prev_pid;
+ unsigned int next_pid;
+ unsigned int next_cpu;
+ unsigned char prev_prio;
+ unsigned char prev_state;
+ unsigned char next_prio;
+ unsigned char next_state;
+};
+
+struct hwlat_entry {
+ struct trace_entry ent;
+ u64 duration;
+ u64 outer_duration;
+ u64 nmi_total_ts;
+ struct timespec64 timestamp;
+ unsigned int nmi_count;
+ unsigned int seqnum;
+ unsigned int count;
+};
+
+struct trace_mark {
+ long long unsigned int val;
+ char sym;
+};
+
+struct tracer_stat {
+ const char *name;
+ void * (*stat_start)(struct tracer_stat *);
+ void * (*stat_next)(void *, int);
+ cmp_func_t stat_cmp;
+ int (*stat_show)(struct seq_file *, void *);
+ void (*stat_release)(void *);
+ int (*stat_headers)(struct seq_file *);
+};
+
+struct stat_node {
+ struct rb_node node;
+ void *stat;
+};
+
+struct stat_session {
+ struct list_head session_list;
+ struct tracer_stat *ts;
+ struct rb_root stat_root;
+ struct mutex stat_mutex;
+ struct dentry *file;
+};
+
+struct trace_bprintk_fmt {
+ struct list_head list;
+ const char *fmt;
+};
+
+typedef int (*tracing_map_cmp_fn_t)(void *, void *);
+
+struct tracing_map_field {
+ tracing_map_cmp_fn_t cmp_fn;
+ union {
+ atomic64_t sum;
+ unsigned int offset;
+ };
+};
+
+struct tracing_map;
+
+struct tracing_map_elt {
+ struct tracing_map *map;
+ struct tracing_map_field *fields;
+ atomic64_t *vars;
+ bool *var_set;
+ void *key;
+ void *private_data;
+};
+
+struct tracing_map_sort_key {
+ unsigned int field_idx;
+ bool descending;
+};
+
+struct tracing_map_array;
+
+struct tracing_map_ops;
+
+struct tracing_map {
+ unsigned int key_size;
+ unsigned int map_bits;
+ unsigned int map_size;
+ unsigned int max_elts;
+ atomic_t next_elt;
+ struct tracing_map_array *elts;
+ struct tracing_map_array *map;
+ const struct tracing_map_ops *ops;
+ void *private_data;
+ struct tracing_map_field fields[6];
+ unsigned int n_fields;
+ int key_idx[3];
+ unsigned int n_keys;
+ struct tracing_map_sort_key sort_key;
+ unsigned int n_vars;
+ atomic64_t hits;
+ atomic64_t drops;
+};
+
+struct tracing_map_entry {
+ u32 key;
+ struct tracing_map_elt *val;
+};
+
+struct tracing_map_sort_entry {
+ void *key;
+ struct tracing_map_elt *elt;
+ bool elt_copied;
+ bool dup;
+};
+
+struct tracing_map_array {
+ unsigned int entries_per_page;
+ unsigned int entry_size_shift;
+ unsigned int entry_shift;
+ unsigned int entry_mask;
+ unsigned int n_pages;
+ void **pages;
+};
+
+struct tracing_map_ops {
+ int (*elt_alloc)(struct tracing_map_elt *);
+ void (*elt_free)(struct tracing_map_elt *);
+ void (*elt_clear)(struct tracing_map_elt *);
+ void (*elt_init)(struct tracing_map_elt *);
+};
+
+struct trace_event_raw_preemptirq_template {
+ struct trace_entry ent;
+ s32 caller_offs;
+ s32 parent_offs;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_preemptirq_template {};
+
+typedef void (*btf_trace_irq_disable)(void *, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_irq_enable)(void *, long unsigned int, long unsigned int);
+
+enum {
+ TRACE_NOP_OPT_ACCEPT = 1,
+ TRACE_NOP_OPT_REFUSE = 2,
+};
+
+struct trace_mmiotrace_rw {
+ struct trace_entry ent;
+ struct mmiotrace_rw rw;
+};
+
+struct trace_mmiotrace_map {
+ struct trace_entry ent;
+ struct mmiotrace_map map;
+};
+
+struct header_iter {
+ struct pci_dev *dev;
+};
+
+typedef __u32 blk_mq_req_flags_t;
+
+enum req_opf {
+ REQ_OP_READ = 0,
+ REQ_OP_WRITE = 1,
+ REQ_OP_FLUSH = 2,
+ REQ_OP_DISCARD = 3,
+ REQ_OP_SECURE_ERASE = 5,
+ REQ_OP_ZONE_RESET = 6,
+ REQ_OP_WRITE_SAME = 7,
+ REQ_OP_ZONE_RESET_ALL = 8,
+ REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_ZONE_OPEN = 10,
+ REQ_OP_ZONE_CLOSE = 11,
+ REQ_OP_ZONE_FINISH = 12,
+ REQ_OP_ZONE_APPEND = 13,
+ REQ_OP_SCSI_IN = 32,
+ REQ_OP_SCSI_OUT = 33,
+ REQ_OP_DRV_IN = 34,
+ REQ_OP_DRV_OUT = 35,
+ REQ_OP_LAST = 36,
+};
+
+enum req_flag_bits {
+ __REQ_FAILFAST_DEV = 8,
+ __REQ_FAILFAST_TRANSPORT = 9,
+ __REQ_FAILFAST_DRIVER = 10,
+ __REQ_SYNC = 11,
+ __REQ_META = 12,
+ __REQ_PRIO = 13,
+ __REQ_NOMERGE = 14,
+ __REQ_IDLE = 15,
+ __REQ_INTEGRITY = 16,
+ __REQ_FUA = 17,
+ __REQ_PREFLUSH = 18,
+ __REQ_RAHEAD = 19,
+ __REQ_BACKGROUND = 20,
+ __REQ_NOWAIT = 21,
+ __REQ_CGROUP_PUNT = 22,
+ __REQ_NOUNMAP = 23,
+ __REQ_HIPRI = 24,
+ __REQ_DRV = 25,
+ __REQ_SWAP = 26,
+ __REQ_NR_BITS = 27,
+};
+
+struct disk_stats {
+ u64 nsecs[4];
+ long unsigned int sectors[4];
+ long unsigned int ios[4];
+ long unsigned int merges[4];
+ long unsigned int io_ticks;
+ local_t in_flight[2];
+};
+
+struct blk_mq_ctxs;
+
+struct blk_mq_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head rq_lists[3];
+ long: 64;
+ long: 64;
+ };
+ unsigned int cpu;
+ short unsigned int index_hw[3];
+ struct blk_mq_hw_ctx *hctxs[3];
+ long unsigned int rq_dispatched[2];
+ long unsigned int rq_merged;
+ long unsigned int rq_completed[2];
+ struct request_queue *queue;
+ struct blk_mq_ctxs *ctxs;
+ struct kobject kobj;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct sbitmap_word;
+
+struct sbitmap {
+ unsigned int depth;
+ unsigned int shift;
+ unsigned int map_nr;
+ struct sbitmap_word *map;
+};
+
+struct blk_mq_tags;
+
+struct blk_mq_hw_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head dispatch;
+ long unsigned int state;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct delayed_work run_work;
+ cpumask_var_t cpumask;
+ int next_cpu;
+ int next_cpu_batch;
+ long unsigned int flags;
+ void *sched_data;
+ struct request_queue *queue;
+ struct blk_flush_queue *fq;
+ void *driver_data;
+ struct sbitmap ctx_map;
+ struct blk_mq_ctx *dispatch_from;
+ unsigned int dispatch_busy;
+ short unsigned int type;
+ short unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ spinlock_t dispatch_wait_lock;
+ wait_queue_entry_t dispatch_wait;
+ atomic_t wait_index;
+ struct blk_mq_tags *tags;
+ struct blk_mq_tags *sched_tags;
+ long unsigned int queued;
+ long unsigned int run;
+ long unsigned int dispatched[7];
+ unsigned int numa_node;
+ unsigned int queue_num;
+ atomic_t nr_active;
+ struct hlist_node cpuhp_online;
+ struct hlist_node cpuhp_dead;
+ struct kobject kobj;
+ long unsigned int poll_considered;
+ long unsigned int poll_invoked;
+ long unsigned int poll_success;
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+ struct list_head hctx_list;
+ struct srcu_struct srcu[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct blk_mq_alloc_data {
+ struct request_queue *q;
+ blk_mq_req_flags_t flags;
+ unsigned int shallow_depth;
+ unsigned int cmd_flags;
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+struct blk_stat_callback {
+ struct list_head list;
+ struct timer_list timer;
+ struct blk_rq_stat *cpu_stat;
+ int (*bucket_fn)(const struct request *);
+ unsigned int buckets;
+ struct blk_rq_stat *stat;
+ void (*timer_fn)(struct blk_stat_callback *);
+ void *data;
+ struct callback_head rcu;
+};
+
+struct blk_flush_queue {
+ unsigned int flush_pending_idx: 1;
+ unsigned int flush_running_idx: 1;
+ blk_status_t rq_status;
+ long unsigned int flush_pending_since;
+ struct list_head flush_queue[2];
+ struct list_head flush_data_in_flight;
+ struct request *flush_rq;
+ struct request *orig_rq;
+ struct lock_class_key key;
+ spinlock_t mq_flush_lock;
+};
+
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+struct blk_mq_tag_set {
+ struct blk_mq_queue_map map[3];
+ unsigned int nr_maps;
+ const struct blk_mq_ops *ops;
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
+ unsigned int reserved_tags;
+ unsigned int cmd_size;
+ int numa_node;
+ unsigned int timeout;
+ unsigned int flags;
+ void *driver_data;
+ struct blk_mq_tags **tags;
+ struct mutex tag_list_lock;
+ struct list_head tag_list;
+};
+
+struct sbitmap_word {
+ long unsigned int depth;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long unsigned int word;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long unsigned int cleared;
+ spinlock_t swap_lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct sbq_wait_state {
+ atomic_t wait_cnt;
+ wait_queue_head_t wait;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct sbitmap_queue {
+ struct sbitmap sb;
+ unsigned int *alloc_hint;
+ unsigned int wake_batch;
+ atomic_t wake_index;
+ struct sbq_wait_state *ws;
+ atomic_t ws_active;
+ bool round_robin;
+ unsigned int min_shallow_depth;
+};
+
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+ atomic_t active_queues;
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+};
+
+struct blk_mq_queue_data {
+ struct request *rq;
+ bool last;
+};
+
+struct blk_crypto_mode {
+ const char *cipher_str;
+ unsigned int keysize;
+ unsigned int ivsize;
+};
+
+struct blk_mq_ctxs {
+ struct kobject kobj;
+ struct blk_mq_ctx *queue_ctx;
+};
+
+enum {
+ TRACE_PIDS = 1,
+ TRACE_NO_PIDS = 2,
+};
+
+struct ftrace_event_field {
+ struct list_head link;
+ const char *name;
+ const char *type;
+ int filter_type;
+ int offset;
+ int size;
+ int is_signed;
+};
+
+enum {
+ FORMAT_HEADER = 1,
+ FORMAT_FIELD_SEPERATOR = 2,
+ FORMAT_PRINTFMT = 3,
+};
+
+typedef long unsigned int perf_trace_t[256];
+
+struct filter_pred;
+
+struct prog_entry {
+ int target;
+ int when_to_branch;
+ struct filter_pred *pred;
+};
+
+typedef int (*filter_pred_fn_t)(struct filter_pred *, void *);
+
+struct regex;
+
+typedef int (*regex_match_func)(char *, struct regex *, int);
+
+struct regex {
+ char pattern[256];
+ int len;
+ int field_len;
+ regex_match_func match;
+};
+
+struct filter_pred {
+ filter_pred_fn_t fn;
+ u64 val;
+ struct regex regex;
+ short unsigned int *ops;
+ struct ftrace_event_field *field;
+ int offset;
+ int not;
+ int op;
+};
+
+enum regex_type {
+ MATCH_FULL = 0,
+ MATCH_FRONT_ONLY = 1,
+ MATCH_MIDDLE_ONLY = 2,
+ MATCH_END_ONLY = 3,
+ MATCH_GLOB = 4,
+ MATCH_INDEX = 5,
+};
+
+enum filter_op_ids {
+ OP_GLOB = 0,
+ OP_NE = 1,
+ OP_EQ = 2,
+ OP_LE = 3,
+ OP_LT = 4,
+ OP_GE = 5,
+ OP_GT = 6,
+ OP_BAND = 7,
+ OP_MAX = 8,
+};
+
+enum {
+ FILT_ERR_NONE = 0,
+ FILT_ERR_INVALID_OP = 1,
+ FILT_ERR_TOO_MANY_OPEN = 2,
+ FILT_ERR_TOO_MANY_CLOSE = 3,
+ FILT_ERR_MISSING_QUOTE = 4,
+ FILT_ERR_OPERAND_TOO_LONG = 5,
+ FILT_ERR_EXPECT_STRING = 6,
+ FILT_ERR_EXPECT_DIGIT = 7,
+ FILT_ERR_ILLEGAL_FIELD_OP = 8,
+ FILT_ERR_FIELD_NOT_FOUND = 9,
+ FILT_ERR_ILLEGAL_INTVAL = 10,
+ FILT_ERR_BAD_SUBSYS_FILTER = 11,
+ FILT_ERR_TOO_MANY_PREDS = 12,
+ FILT_ERR_INVALID_FILTER = 13,
+ FILT_ERR_IP_FIELD_ONLY = 14,
+ FILT_ERR_INVALID_VALUE = 15,
+ FILT_ERR_ERRNO = 16,
+ FILT_ERR_NO_FILTER = 17,
+};
+
+struct filter_parse_error {
+ int lasterr;
+ int lasterr_pos;
+};
+
+typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **);
+
+enum {
+ INVERT = 1,
+ PROCESS_AND = 2,
+ PROCESS_OR = 4,
+};
+
+enum {
+ TOO_MANY_CLOSE = -1,
+ TOO_MANY_OPEN = -2,
+ MISSING_QUOTE = -3,
+};
+
+struct filter_list {
+ struct list_head list;
+ struct event_filter *filter;
+};
+
+struct event_trigger_ops;
+
+struct event_command;
+
+struct event_trigger_data {
+ long unsigned int count;
+ int ref;
+ struct event_trigger_ops *ops;
+ struct event_command *cmd_ops;
+ struct event_filter *filter;
+ char *filter_str;
+ void *private_data;
+ bool paused;
+ bool paused_tmp;
+ struct list_head list;
+ char *name;
+ struct list_head named_list;
+ struct event_trigger_data *named_data;
+};
+
+struct event_trigger_ops {
+ void (*func)(struct event_trigger_data *, void *, struct ring_buffer_event *);
+ int (*init)(struct event_trigger_ops *, struct event_trigger_data *);
+ void (*free)(struct event_trigger_ops *, struct event_trigger_data *);
+ int (*print)(struct seq_file *, struct event_trigger_ops *, struct event_trigger_data *);
+};
+
+struct event_command {
+ struct list_head list;
+ char *name;
+ enum event_trigger_type trigger_type;
+ int flags;
+ int (*func)(struct event_command *, struct trace_event_file *, char *, char *, char *);
+ int (*reg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *);
+ void (*unreg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *);
+ void (*unreg_all)(struct trace_event_file *);
+ int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *);
+ struct event_trigger_ops * (*get_trigger_ops)(char *, char *);
+};
+
+struct enable_trigger_data {
+ struct trace_event_file *file;
+ bool enable;
+ bool hist;
+};
+
+enum event_command_flags {
+ EVENT_CMD_FL_POST_TRIGGER = 1,
+ EVENT_CMD_FL_NEEDS_REC = 2,
+};
+
+enum dynevent_type {
+ DYNEVENT_TYPE_SYNTH = 1,
+ DYNEVENT_TYPE_KPROBE = 2,
+ DYNEVENT_TYPE_NONE = 3,
+};
+
+struct dynevent_cmd;
+
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *);
+
+struct dynevent_cmd {
+ struct seq_buf seq;
+ const char *event_name;
+ unsigned int n_fields;
+ enum dynevent_type type;
+ dynevent_create_fn_t run_command;
+ void *private_data;
+};
+
+struct synth_field_desc {
+ const char *type;
+ const char *name;
+};
+
+struct synth_trace_event;
+
+struct synth_event;
+
+struct synth_event_trace_state {
+ struct trace_event_buffer fbuffer;
+ struct synth_trace_event *entry;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int cur_field;
+ unsigned int n_u64;
+ bool disabled;
+ bool add_next;
+ bool add_name;
+};
+
+struct synth_trace_event {
+ struct trace_entry ent;
+ u64 fields[0];
+};
+
+struct dyn_event_operations;
+
+struct dyn_event {
+ struct list_head list;
+ struct dyn_event_operations *ops;
+};
+
+struct synth_field;
+
+struct synth_event {
+ struct dyn_event devent;
+ int ref;
+ char *name;
+ struct synth_field **fields;
+ unsigned int n_fields;
+ unsigned int n_u64;
+ struct trace_event_class class;
+ struct trace_event_call call;
+ struct tracepoint *tp;
+ struct module *mod;
+};
+
+struct dyn_event_operations {
+ struct list_head list;
+ int (*create)(int, const char **);
+ int (*show)(struct seq_file *, struct dyn_event *);
+ bool (*is_busy)(struct dyn_event *);
+ int (*free)(struct dyn_event *);
+ bool (*match)(const char *, const char *, int, const char **, struct dyn_event *);
+};
+
+struct dynevent_arg {
+ const char *str;
+ char separator;
+};
+
+struct dynevent_arg_pair {
+ const char *lhs;
+ const char *rhs;
+ char operator;
+ char separator;
+};
+
+struct synth_field {
+ char *type;
+ char *name;
+ size_t size;
+ unsigned int offset;
+ bool is_signed;
+ bool is_string;
+};
+
+enum {
+ HIST_ERR_NONE = 0,
+ HIST_ERR_DUPLICATE_VAR = 1,
+ HIST_ERR_VAR_NOT_UNIQUE = 2,
+ HIST_ERR_TOO_MANY_VARS = 3,
+ HIST_ERR_MALFORMED_ASSIGNMENT = 4,
+ HIST_ERR_NAMED_MISMATCH = 5,
+ HIST_ERR_TRIGGER_EEXIST = 6,
+ HIST_ERR_TRIGGER_ENOENT_CLEAR = 7,
+ HIST_ERR_SET_CLOCK_FAIL = 8,
+ HIST_ERR_BAD_FIELD_MODIFIER = 9,
+ HIST_ERR_TOO_MANY_SUBEXPR = 10,
+ HIST_ERR_TIMESTAMP_MISMATCH = 11,
+ HIST_ERR_TOO_MANY_FIELD_VARS = 12,
+ HIST_ERR_EVENT_FILE_NOT_FOUND = 13,
+ HIST_ERR_HIST_NOT_FOUND = 14,
+ HIST_ERR_HIST_CREATE_FAIL = 15,
+ HIST_ERR_SYNTH_VAR_NOT_FOUND = 16,
+ HIST_ERR_SYNTH_EVENT_NOT_FOUND = 17,
+ HIST_ERR_SYNTH_TYPE_MISMATCH = 18,
+ HIST_ERR_SYNTH_COUNT_MISMATCH = 19,
+ HIST_ERR_FIELD_VAR_PARSE_FAIL = 20,
+ HIST_ERR_VAR_CREATE_FIND_FAIL = 21,
+ HIST_ERR_ONX_NOT_VAR = 22,
+ HIST_ERR_ONX_VAR_NOT_FOUND = 23,
+ HIST_ERR_ONX_VAR_CREATE_FAIL = 24,
+ HIST_ERR_FIELD_VAR_CREATE_FAIL = 25,
+ HIST_ERR_TOO_MANY_PARAMS = 26,
+ HIST_ERR_PARAM_NOT_FOUND = 27,
+ HIST_ERR_INVALID_PARAM = 28,
+ HIST_ERR_ACTION_NOT_FOUND = 29,
+ HIST_ERR_NO_SAVE_PARAMS = 30,
+ HIST_ERR_TOO_MANY_SAVE_ACTIONS = 31,
+ HIST_ERR_ACTION_MISMATCH = 32,
+ HIST_ERR_NO_CLOSING_PAREN = 33,
+ HIST_ERR_SUBSYS_NOT_FOUND = 34,
+ HIST_ERR_INVALID_SUBSYS_EVENT = 35,
+ HIST_ERR_INVALID_REF_KEY = 36,
+ HIST_ERR_VAR_NOT_FOUND = 37,
+ HIST_ERR_FIELD_NOT_FOUND = 38,
+ HIST_ERR_EMPTY_ASSIGNMENT = 39,
+ HIST_ERR_INVALID_SORT_MODIFIER = 40,
+ HIST_ERR_EMPTY_SORT_FIELD = 41,
+ HIST_ERR_TOO_MANY_SORT_FIELDS = 42,
+ HIST_ERR_INVALID_SORT_FIELD = 43,
+};
+
+struct hist_field;
+
+typedef u64 (*hist_field_fn_t)(struct hist_field *, struct tracing_map_elt *, struct ring_buffer_event *, void *);
+
+struct hist_trigger_data;
+
+struct hist_var {
+ char *name;
+ struct hist_trigger_data *hist_data;
+ unsigned int idx;
+};
+
+enum field_op_id {
+ FIELD_OP_NONE = 0,
+ FIELD_OP_PLUS = 1,
+ FIELD_OP_MINUS = 2,
+ FIELD_OP_UNARY_MINUS = 3,
+};
+
+struct hist_field {
+ struct ftrace_event_field *field;
+ long unsigned int flags;
+ hist_field_fn_t fn;
+ unsigned int ref;
+ unsigned int size;
+ unsigned int offset;
+ unsigned int is_signed;
+ const char *type;
+ struct hist_field *operands[2];
+ struct hist_trigger_data *hist_data;
+ struct hist_var var;
+ enum field_op_id operator;
+ char *system;
+ char *event_name;
+ char *name;
+ unsigned int var_ref_idx;
+ bool read_once;
+};
+
+struct hist_trigger_attrs;
+
+struct action_data;
+
+struct field_var;
+
+struct field_var_hist;
+
+struct hist_trigger_data {
+ struct hist_field *fields[22];
+ unsigned int n_vals;
+ unsigned int n_keys;
+ unsigned int n_fields;
+ unsigned int n_vars;
+ unsigned int key_size;
+ struct tracing_map_sort_key sort_keys[2];
+ unsigned int n_sort_keys;
+ struct trace_event_file *event_file;
+ struct hist_trigger_attrs *attrs;
+ struct tracing_map *map;
+ bool enable_timestamps;
+ bool remove;
+ struct hist_field *var_refs[16];
+ unsigned int n_var_refs;
+ struct action_data *actions[8];
+ unsigned int n_actions;
+ struct field_var *field_vars[32];
+ unsigned int n_field_vars;
+ unsigned int n_field_var_str;
+ struct field_var_hist *field_var_hists[32];
+ unsigned int n_field_var_hists;
+ struct field_var *save_vars[32];
+ unsigned int n_save_vars;
+ unsigned int n_save_var_str;
+};
+
+enum hist_field_flags {
+ HIST_FIELD_FL_HITCOUNT = 1,
+ HIST_FIELD_FL_KEY = 2,
+ HIST_FIELD_FL_STRING = 4,
+ HIST_FIELD_FL_HEX = 8,
+ HIST_FIELD_FL_SYM = 16,
+ HIST_FIELD_FL_SYM_OFFSET = 32,
+ HIST_FIELD_FL_EXECNAME = 64,
+ HIST_FIELD_FL_SYSCALL = 128,
+ HIST_FIELD_FL_STACKTRACE = 256,
+ HIST_FIELD_FL_LOG2 = 512,
+ HIST_FIELD_FL_TIMESTAMP = 1024,
+ HIST_FIELD_FL_TIMESTAMP_USECS = 2048,
+ HIST_FIELD_FL_VAR = 4096,
+ HIST_FIELD_FL_EXPR = 8192,
+ HIST_FIELD_FL_VAR_REF = 16384,
+ HIST_FIELD_FL_CPU = 32768,
+ HIST_FIELD_FL_ALIAS = 65536,
+};
+
+struct var_defs {
+ unsigned int n_vars;
+ char *name[16];
+ char *expr[16];
+};
+
+struct hist_trigger_attrs {
+ char *keys_str;
+ char *vals_str;
+ char *sort_key_str;
+ char *name;
+ char *clock;
+ bool pause;
+ bool cont;
+ bool clear;
+ bool ts_in_usecs;
+ unsigned int map_bits;
+ char *assignment_str[16];
+ unsigned int n_assignments;
+ char *action_str[8];
+ unsigned int n_actions;
+ struct var_defs var_defs;
+};
+
+struct field_var {
+ struct hist_field *var;
+ struct hist_field *val;
+};
+
+struct field_var_hist {
+ struct hist_trigger_data *hist_data;
+ char *cmd;
+};
+
+enum handler_id {
+ HANDLER_ONMATCH = 1,
+ HANDLER_ONMAX = 2,
+ HANDLER_ONCHANGE = 3,
+};
+
+enum action_id {
+ ACTION_SAVE = 1,
+ ACTION_TRACE = 2,
+ ACTION_SNAPSHOT = 3,
+};
+
+typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, void *, struct ring_buffer_event *, void *, struct action_data *, u64 *);
+
+typedef bool (*check_track_val_fn_t)(u64, u64);
+
+struct action_data {
+ enum handler_id handler;
+ enum action_id action;
+ char *action_name;
+ action_fn_t fn;
+ unsigned int n_params;
+ char *params[32];
+ unsigned int var_ref_idx[16];
+ struct synth_event *synth_event;
+ bool use_trace_keyword;
+ char *synth_event_name;
+ union {
+ struct {
+ char *event;
+ char *event_system;
+ } match_data;
+ struct {
+ char *var_str;
+ struct hist_field *var_ref;
+ struct hist_field *track_var;
+ check_track_val_fn_t check_val;
+ action_fn_t save_data;
+ } track_data;
+ };
+};
+
+struct track_data {
+ u64 track_val;
+ bool updated;
+ unsigned int key_len;
+ void *key;
+ struct tracing_map_elt elt;
+ struct action_data *action_data;
+ struct hist_trigger_data *hist_data;
+};
+
+struct hist_elt_data {
+ char *comm;
+ u64 *var_ref_vals;
+ char *field_var_str[32];
+};
+
+typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int *);
+
+struct hist_var_data {
+ struct list_head list;
+ struct hist_trigger_data *hist_data;
+};
+
+enum {
+ BPF_F_INDEX_MASK = 0xffffffffULL,
+ BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
+ BPF_F_CTXLEN_MASK = 0,
+};
+
+enum {
+ BPF_F_GET_BRANCH_RECORDS_SIZE = 1,
+};
+
+struct bpf_perf_event_value {
+ __u64 counter;
+ __u64 enabled;
+ __u64 running;
+};
+
+struct bpf_raw_tracepoint_args {
+ __u64 args[0];
+};
+
+enum bpf_task_fd_type {
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0,
+ BPF_FD_TYPE_TRACEPOINT = 1,
+ BPF_FD_TYPE_KPROBE = 2,
+ BPF_FD_TYPE_KRETPROBE = 3,
+ BPF_FD_TYPE_UPROBE = 4,
+ BPF_FD_TYPE_URETPROBE = 5,
+};
+
+struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+ struct file *map_file;
+ struct callback_head rcu;
+};
+
+typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int);
+
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+struct bpf_perf_event_data {
+ bpf_user_pt_regs_t regs;
+ __u64 sample_period;
+ __u64 addr;
+};
+
+struct perf_event_query_bpf {
+ __u32 ids_len;
+ __u32 prog_cnt;
+ __u32 ids[0];
+};
+
+struct bpf_perf_event_data_kern {
+ bpf_user_pt_regs_t *regs;
+ struct perf_sample_data *data;
+ struct perf_event *event;
+};
+
+struct bpf_trace_module {
+ struct module *module;
+ struct list_head list;
+};
+
+typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *);
+
+typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32);
+
+typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64);
+
+struct bpf_seq_printf_buf {
+ char buf[768];
+};
+
+typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32);
+
+typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32);
+
+typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64);
+
+typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32);
+
+struct bpf_trace_sample_data {
+ struct perf_sample_data sds[3];
+};
+
+typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64);
+
+struct bpf_nested_pt_regs {
+ struct pt_regs regs[3];
+};
+
+typedef u64 (*btf_bpf_get_current_task)();
+
+typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32);
+
+struct send_signal_irq_work {
+ struct irq_work irq_work;
+ struct task_struct *task;
+ u32 sig;
+ enum pid_type type;
+};
+
+typedef u64 (*btf_bpf_send_signal)(u32);
+
+typedef u64 (*btf_bpf_send_signal_thread)(u32);
+
+typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64);
+
+typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64);
+
+typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64);
+
+typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32);
+
+typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64);
+
+struct bpf_raw_tp_regs {
+ struct pt_regs regs[3];
+};
+
+typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64);
+
+typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64);
+
+typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64);
+
+typedef struct bpf_cgroup_storage *pto_T_____23;
+
+struct kprobe_trace_entry_head {
+ struct trace_entry ent;
+ long unsigned int ip;
+};
+
+struct kretprobe_trace_entry_head {
+ struct trace_entry ent;
+ long unsigned int func;
+ long unsigned int ret_ip;
+};
+
+typedef int (*print_type_func_t)(struct trace_seq *, void *, void *);
+
+enum fetch_op {
+ FETCH_OP_NOP = 0,
+ FETCH_OP_REG = 1,
+ FETCH_OP_STACK = 2,
+ FETCH_OP_STACKP = 3,
+ FETCH_OP_RETVAL = 4,
+ FETCH_OP_IMM = 5,
+ FETCH_OP_COMM = 6,
+ FETCH_OP_ARG = 7,
+ FETCH_OP_FOFFS = 8,
+ FETCH_OP_DATA = 9,
+ FETCH_OP_DEREF = 10,
+ FETCH_OP_UDEREF = 11,
+ FETCH_OP_ST_RAW = 12,
+ FETCH_OP_ST_MEM = 13,
+ FETCH_OP_ST_UMEM = 14,
+ FETCH_OP_ST_STRING = 15,
+ FETCH_OP_ST_USTRING = 16,
+ FETCH_OP_MOD_BF = 17,
+ FETCH_OP_LP_ARRAY = 18,
+ FETCH_OP_END = 19,
+ FETCH_NOP_SYMBOL = 20,
+};
+
+struct fetch_insn {
+ enum fetch_op op;
+ union {
+ unsigned int param;
+ struct {
+ unsigned int size;
+ int offset;
+ };
+ struct {
+ unsigned char basesize;
+ unsigned char lshift;
+ unsigned char rshift;
+ };
+ long unsigned int immediate;
+ void *data;
+ };
+};
+
+struct fetch_type {
+ const char *name;
+ size_t size;
+ int is_signed;
+ print_type_func_t print;
+ const char *fmt;
+ const char *fmttype;
+};
+
+struct probe_arg {
+ struct fetch_insn *code;
+ bool dynamic;
+ unsigned int offset;
+ unsigned int count;
+ const char *name;
+ const char *comm;
+ char *fmt;
+ const struct fetch_type *type;
+};
+
+struct trace_uprobe_filter {
+ rwlock_t rwlock;
+ int nr_systemwide;
+ struct list_head perf_events;
+};
+
+struct trace_probe_event {
+ unsigned int flags;
+ struct trace_event_class class;
+ struct trace_event_call call;
+ struct list_head files;
+ struct list_head probes;
+ struct trace_uprobe_filter filter[0];
+};
+
+struct trace_probe {
+ struct list_head list;
+ struct trace_probe_event *event;
+ ssize_t size;
+ unsigned int nr_args;
+ struct probe_arg args[0];
+};
+
+struct event_file_link {
+ struct trace_event_file *file;
+ struct list_head list;
+};
+
+enum {
+ TP_ERR_FILE_NOT_FOUND = 0,
+ TP_ERR_NO_REGULAR_FILE = 1,
+ TP_ERR_BAD_REFCNT = 2,
+ TP_ERR_REFCNT_OPEN_BRACE = 3,
+ TP_ERR_BAD_REFCNT_SUFFIX = 4,
+ TP_ERR_BAD_UPROBE_OFFS = 5,
+ TP_ERR_MAXACT_NO_KPROBE = 6,
+ TP_ERR_BAD_MAXACT = 7,
+ TP_ERR_MAXACT_TOO_BIG = 8,
+ TP_ERR_BAD_PROBE_ADDR = 9,
+ TP_ERR_BAD_RETPROBE = 10,
+ TP_ERR_NO_GROUP_NAME = 11,
+ TP_ERR_GROUP_TOO_LONG = 12,
+ TP_ERR_BAD_GROUP_NAME = 13,
+ TP_ERR_NO_EVENT_NAME = 14,
+ TP_ERR_EVENT_TOO_LONG = 15,
+ TP_ERR_BAD_EVENT_NAME = 16,
+ TP_ERR_RETVAL_ON_PROBE = 17,
+ TP_ERR_BAD_STACK_NUM = 18,
+ TP_ERR_BAD_ARG_NUM = 19,
+ TP_ERR_BAD_VAR = 20,
+ TP_ERR_BAD_REG_NAME = 21,
+ TP_ERR_BAD_MEM_ADDR = 22,
+ TP_ERR_BAD_IMM = 23,
+ TP_ERR_IMMSTR_NO_CLOSE = 24,
+ TP_ERR_FILE_ON_KPROBE = 25,
+ TP_ERR_BAD_FILE_OFFS = 26,
+ TP_ERR_SYM_ON_UPROBE = 27,
+ TP_ERR_TOO_MANY_OPS = 28,
+ TP_ERR_DEREF_NEED_BRACE = 29,
+ TP_ERR_BAD_DEREF_OFFS = 30,
+ TP_ERR_DEREF_OPEN_BRACE = 31,
+ TP_ERR_COMM_CANT_DEREF = 32,
+ TP_ERR_BAD_FETCH_ARG = 33,
+ TP_ERR_ARRAY_NO_CLOSE = 34,
+ TP_ERR_BAD_ARRAY_SUFFIX = 35,
+ TP_ERR_BAD_ARRAY_NUM = 36,
+ TP_ERR_ARRAY_TOO_BIG = 37,
+ TP_ERR_BAD_TYPE = 38,
+ TP_ERR_BAD_STRING = 39,
+ TP_ERR_BAD_BITFIELD = 40,
+ TP_ERR_ARG_NAME_TOO_LONG = 41,
+ TP_ERR_NO_ARG_NAME = 42,
+ TP_ERR_BAD_ARG_NAME = 43,
+ TP_ERR_USED_ARG_NAME = 44,
+ TP_ERR_ARG_TOO_LONG = 45,
+ TP_ERR_NO_ARG_BODY = 46,
+ TP_ERR_BAD_INSN_BNDRY = 47,
+ TP_ERR_FAIL_REG_PROBE = 48,
+ TP_ERR_DIFF_PROBE_TYPE = 49,
+ TP_ERR_DIFF_ARG_TYPE = 50,
+ TP_ERR_SAME_PROBE = 51,
+};
+
+struct trace_kprobe {
+ struct dyn_event devent;
+ struct kretprobe rp;
+ long unsigned int *nhit;
+ const char *symbol;
+ struct trace_probe tp;
+};
+
+struct trace_event_raw_cpu {
+ struct trace_entry ent;
+ u32 state;
+ u32 cpu_id;
+ char __data[0];
+};
+
+struct trace_event_raw_powernv_throttle {
+ struct trace_entry ent;
+ int chip_id;
+ u32 __data_loc_reason;
+ int pmax;
+ char __data[0];
+};
+
+struct trace_event_raw_pstate_sample {
+ struct trace_entry ent;
+ u32 core_busy;
+ u32 scaled_busy;
+ u32 from;
+ u32 to;
+ u64 mperf;
+ u64 aperf;
+ u64 tsc;
+ u32 freq;
+ u32 io_boost;
+ char __data[0];
+};
+
+struct trace_event_raw_cpu_frequency_limits {
+ struct trace_entry ent;
+ u32 min_freq;
+ u32 max_freq;
+ u32 cpu_id;
+ char __data[0];
+};
+
+struct trace_event_raw_device_pm_callback_start {
+ struct trace_entry ent;
+ u32 __data_loc_device;
+ u32 __data_loc_driver;
+ u32 __data_loc_parent;
+ u32 __data_loc_pm_ops;
+ int event;
+ char __data[0];
+};
+
+struct trace_event_raw_device_pm_callback_end {
+ struct trace_entry ent;
+ u32 __data_loc_device;
+ u32 __data_loc_driver;
+ int error;
+ char __data[0];
+};
+
+struct trace_event_raw_suspend_resume {
+ struct trace_entry ent;
+ const char *action;
+ int val;
+ bool start;
+ char __data[0];
+};
+
+struct trace_event_raw_wakeup_source {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u64 state;
+ char __data[0];
+};
+
+struct trace_event_raw_clock {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u64 state;
+ u64 cpu_id;
+ char __data[0];
+};
+
+struct trace_event_raw_power_domain {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u64 state;
+ u64 cpu_id;
+ char __data[0];
+};
+
+struct trace_event_raw_cpu_latency_qos_request {
+ struct trace_entry ent;
+ s32 value;
+ char __data[0];
+};
+
+struct trace_event_raw_pm_qos_update {
+ struct trace_entry ent;
+ enum pm_qos_req_action action;
+ int prev_value;
+ int curr_value;
+ char __data[0];
+};
+
+struct trace_event_raw_dev_pm_qos_request {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ enum dev_pm_qos_req_type type;
+ s32 new_value;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_cpu {};
+
+struct trace_event_data_offsets_powernv_throttle {
+ u32 reason;
+};
+
+struct trace_event_data_offsets_pstate_sample {};
+
+struct trace_event_data_offsets_cpu_frequency_limits {};
+
+struct trace_event_data_offsets_device_pm_callback_start {
+ u32 device;
+ u32 driver;
+ u32 parent;
+ u32 pm_ops;
+};
+
+struct trace_event_data_offsets_device_pm_callback_end {
+ u32 device;
+ u32 driver;
+};
+
+struct trace_event_data_offsets_suspend_resume {};
+
+struct trace_event_data_offsets_wakeup_source {
+ u32 name;
+};
+
+struct trace_event_data_offsets_clock {
+ u32 name;
+};
+
+struct trace_event_data_offsets_power_domain {
+ u32 name;
+};
+
+struct trace_event_data_offsets_cpu_latency_qos_request {};
+
+struct trace_event_data_offsets_pm_qos_update {};
+
+struct trace_event_data_offsets_dev_pm_qos_request {
+ u32 name;
+};
+
+typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int);
+
+typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32);
+
+typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *);
+
+typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int);
+
+typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int);
+
+typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool);
+
+typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int);
+
+typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int);
+
+typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_pm_qos_add_request)(void *, s32);
+
+typedef void (*btf_trace_pm_qos_update_request)(void *, s32);
+
+typedef void (*btf_trace_pm_qos_remove_request)(void *, s32);
+
+typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int);
+
+typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int);
+
+typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32);
+
+typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32);
+
+typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32);
+
+typedef int (*dynevent_check_arg_fn_t)(void *);
+
+struct trace_probe_log {
+ const char *subsystem;
+ const char **argv;
+ int argc;
+ int index;
+};
+
+enum uprobe_filter_ctx {
+ UPROBE_FILTER_REGISTER = 0,
+ UPROBE_FILTER_UNREGISTER = 1,
+ UPROBE_FILTER_MMAP = 2,
+};
+
+struct uprobe_consumer {
+ int (*handler)(struct uprobe_consumer *, struct pt_regs *);
+ int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *);
+ bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *);
+ struct uprobe_consumer *next;
+};
+
+struct uprobe_trace_entry_head {
+ struct trace_entry ent;
+ long unsigned int vaddr[0];
+};
+
+struct trace_uprobe {
+ struct dyn_event devent;
+ struct uprobe_consumer consumer;
+ struct path path;
+ struct inode *inode;
+ char *filename;
+ long unsigned int offset;
+ long unsigned int ref_ctr_offset;
+ long unsigned int nhit;
+ struct trace_probe tp;
+};
+
+struct uprobe_dispatch_data {
+ struct trace_uprobe *tu;
+ long unsigned int bp_addr;
+};
+
+struct uprobe_cpu_buffer {
+ struct mutex mutex;
+ void *buf;
+};
+
+typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *);
+
+struct rnd_state {
+ __u32 s1;
+ __u32 s2;
+ __u32 s3;
+ __u32 s4;
+};
+
+enum xdp_action {
+ XDP_ABORTED = 0,
+ XDP_DROP = 1,
+ XDP_PASS = 2,
+ XDP_TX = 3,
+ XDP_REDIRECT = 4,
+};
+
+enum xdp_mem_type {
+ MEM_TYPE_PAGE_SHARED = 0,
+ MEM_TYPE_PAGE_ORDER0 = 1,
+ MEM_TYPE_PAGE_POOL = 2,
+ MEM_TYPE_XSK_BUFF_POOL = 3,
+ MEM_TYPE_MAX = 4,
+};
+
+typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int);
+
+struct bpf_prog_dummy {
+ struct bpf_prog prog;
+};
+
+typedef u64 (*btf_bpf_user_rnd_u32)();
+
+typedef u64 (*btf_bpf_get_raw_cpu_id)();
+
+struct _bpf_dtab_netdev {
+ struct net_device *dev;
+};
+
+struct zero_copy_allocator;
+
+struct page_pool;
+
+struct xdp_mem_allocator {
+ struct xdp_mem_info mem;
+ union {
+ void *allocator;
+ struct page_pool *page_pool;
+ struct zero_copy_allocator *zc_alloc;
+ };
+ struct rhash_head node;
+ struct callback_head rcu;
+};
+
+struct trace_event_raw_xdp_exception {
+ struct trace_entry ent;
+ int prog_id;
+ u32 act;
+ int ifindex;
+ char __data[0];
+};
+
+struct trace_event_raw_xdp_bulk_tx {
+ struct trace_entry ent;
+ int ifindex;
+ u32 act;
+ int drops;
+ int sent;
+ int err;
+ char __data[0];
+};
+
+struct trace_event_raw_xdp_redirect_template {
+ struct trace_entry ent;
+ int prog_id;
+ u32 act;
+ int ifindex;
+ int err;
+ int to_ifindex;
+ u32 map_id;
+ int map_index;
+ char __data[0];
+};
+
+struct trace_event_raw_xdp_cpumap_kthread {
+ struct trace_entry ent;
+ int map_id;
+ u32 act;
+ int cpu;
+ unsigned int drops;
+ unsigned int processed;
+ int sched;
+ char __data[0];
+};
+
+struct trace_event_raw_xdp_cpumap_enqueue {
+ struct trace_entry ent;
+ int map_id;
+ u32 act;
+ int cpu;
+ unsigned int drops;
+ unsigned int processed;
+ int to_cpu;
+ char __data[0];
+};
+
+struct trace_event_raw_xdp_devmap_xmit {
+ struct trace_entry ent;
+ int from_ifindex;
+ u32 act;
+ int to_ifindex;
+ int drops;
+ int sent;
+ int err;
+ char __data[0];
+};
+
+struct trace_event_raw_mem_disconnect {
+ struct trace_entry ent;
+ const struct xdp_mem_allocator *xa;
+ u32 mem_id;
+ u32 mem_type;
+ const void *allocator;
+ char __data[0];
+};
+
+struct trace_event_raw_mem_connect {
+ struct trace_entry ent;
+ const struct xdp_mem_allocator *xa;
+ u32 mem_id;
+ u32 mem_type;
+ const void *allocator;
+ const struct xdp_rxq_info *rxq;
+ int ifindex;
+ char __data[0];
+};
+
+struct trace_event_raw_mem_return_failed {
+ struct trace_entry ent;
+ const struct page *page;
+ u32 mem_id;
+ u32 mem_type;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_xdp_exception {};
+
+struct trace_event_data_offsets_xdp_bulk_tx {};
+
+struct trace_event_data_offsets_xdp_redirect_template {};
+
+struct trace_event_data_offsets_xdp_cpumap_kthread {};
+
+struct trace_event_data_offsets_xdp_cpumap_enqueue {};
+
+struct trace_event_data_offsets_xdp_devmap_xmit {};
+
+struct trace_event_data_offsets_mem_disconnect {};
+
+struct trace_event_data_offsets_mem_connect {};
+
+struct trace_event_data_offsets_mem_return_failed {};
+
+typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32);
+
+typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int);
+
+typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32);
+
+typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32);
+
+typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32);
+
+typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32);
+
+typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int);
+
+typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int);
+
+typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int);
+
+typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *);
+
+typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *);
+
+typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *);
+
+enum bpf_cmd {
+ BPF_MAP_CREATE = 0,
+ BPF_MAP_LOOKUP_ELEM = 1,
+ BPF_MAP_UPDATE_ELEM = 2,
+ BPF_MAP_DELETE_ELEM = 3,
+ BPF_MAP_GET_NEXT_KEY = 4,
+ BPF_PROG_LOAD = 5,
+ BPF_OBJ_PIN = 6,
+ BPF_OBJ_GET = 7,
+ BPF_PROG_ATTACH = 8,
+ BPF_PROG_DETACH = 9,
+ BPF_PROG_TEST_RUN = 10,
+ BPF_PROG_GET_NEXT_ID = 11,
+ BPF_MAP_GET_NEXT_ID = 12,
+ BPF_PROG_GET_FD_BY_ID = 13,
+ BPF_MAP_GET_FD_BY_ID = 14,
+ BPF_OBJ_GET_INFO_BY_FD = 15,
+ BPF_PROG_QUERY = 16,
+ BPF_RAW_TRACEPOINT_OPEN = 17,
+ BPF_BTF_LOAD = 18,
+ BPF_BTF_GET_FD_BY_ID = 19,
+ BPF_TASK_FD_QUERY = 20,
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21,
+ BPF_MAP_FREEZE = 22,
+ BPF_BTF_GET_NEXT_ID = 23,
+ BPF_MAP_LOOKUP_BATCH = 24,
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25,
+ BPF_MAP_UPDATE_BATCH = 26,
+ BPF_MAP_DELETE_BATCH = 27,
+ BPF_LINK_CREATE = 28,
+ BPF_LINK_UPDATE = 29,
+ BPF_LINK_GET_FD_BY_ID = 30,
+ BPF_LINK_GET_NEXT_ID = 31,
+ BPF_ENABLE_STATS = 32,
+ BPF_ITER_CREATE = 33,
+};
+
+enum {
+ BPF_ANY = 0,
+ BPF_NOEXIST = 1,
+ BPF_EXIST = 2,
+ BPF_F_LOCK = 4,
+};
+
+enum {
+ BPF_F_NO_PREALLOC = 1,
+ BPF_F_NO_COMMON_LRU = 2,
+ BPF_F_NUMA_NODE = 4,
+ BPF_F_RDONLY = 8,
+ BPF_F_WRONLY = 16,
+ BPF_F_STACK_BUILD_ID = 32,
+ BPF_F_ZERO_SEED = 64,
+ BPF_F_RDONLY_PROG = 128,
+ BPF_F_WRONLY_PROG = 256,
+ BPF_F_CLONE = 512,
+ BPF_F_MMAPABLE = 1024,
+};
+
+enum bpf_stats_type {
+ BPF_STATS_RUN_TIME = 0,
+};
+
+struct bpf_prog_info {
+ __u32 type;
+ __u32 id;
+ __u8 tag[8];
+ __u32 jited_prog_len;
+ __u32 xlated_prog_len;
+ __u64 jited_prog_insns;
+ __u64 xlated_prog_insns;
+ __u64 load_time;
+ __u32 created_by_uid;
+ __u32 nr_map_ids;
+ __u64 map_ids;
+ char name[16];
+ __u32 ifindex;
+ __u32 gpl_compatible: 1;
+ __u64 netns_dev;
+ __u64 netns_ino;
+ __u32 nr_jited_ksyms;
+ __u32 nr_jited_func_lens;
+ __u64 jited_ksyms;
+ __u64 jited_func_lens;
+ __u32 btf_id;
+ __u32 func_info_rec_size;
+ __u64 func_info;
+ __u32 nr_func_info;
+ __u32 nr_line_info;
+ __u64 line_info;
+ __u64 jited_line_info;
+ __u32 nr_jited_line_info;
+ __u32 line_info_rec_size;
+ __u32 jited_line_info_rec_size;
+ __u32 nr_prog_tags;
+ __u64 prog_tags;
+ __u64 run_time_ns;
+ __u64 run_cnt;
+};
+
+struct bpf_map_info {
+ __u32 type;
+ __u32 id;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ char name[16];
+ __u32 ifindex;
+ __u32 btf_vmlinux_value_type_id;
+ __u64 netns_dev;
+ __u64 netns_ino;
+ __u32 btf_id;
+ __u32 btf_key_type_id;
+ __u32 btf_value_type_id;
+};
+
+struct bpf_btf_info {
+ __u64 btf;
+ __u32 btf_size;
+ __u32 id;
+};
+
+struct bpf_spin_lock {
+ __u32 val;
+};
+
+struct bpf_link_primer {
+ struct bpf_link *link;
+ struct file *file;
+ int fd;
+ u32 id;
+};
+
+enum perf_bpf_event_type {
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX = 3,
+};
+
+struct security_hook_heads {
+ struct hlist_head binder_set_context_mgr;
+ struct hlist_head binder_transaction;
+ struct hlist_head binder_transfer_binder;
+ struct hlist_head binder_transfer_file;
+ struct hlist_head ptrace_access_check;
+ struct hlist_head ptrace_traceme;
+ struct hlist_head capget;
+ struct hlist_head capset;
+ struct hlist_head capable;
+ struct hlist_head quotactl;
+ struct hlist_head quota_on;
+ struct hlist_head syslog;
+ struct hlist_head settime;
+ struct hlist_head vm_enough_memory;
+ struct hlist_head bprm_creds_for_exec;
+ struct hlist_head bprm_creds_from_file;
+ struct hlist_head bprm_check_security;
+ struct hlist_head bprm_committing_creds;
+ struct hlist_head bprm_committed_creds;
+ struct hlist_head fs_context_dup;
+ struct hlist_head fs_context_parse_param;
+ struct hlist_head sb_alloc_security;
+ struct hlist_head sb_free_security;
+ struct hlist_head sb_free_mnt_opts;
+ struct hlist_head sb_eat_lsm_opts;
+ struct hlist_head sb_remount;
+ struct hlist_head sb_kern_mount;
+ struct hlist_head sb_show_options;
+ struct hlist_head sb_statfs;
+ struct hlist_head sb_mount;
+ struct hlist_head sb_umount;
+ struct hlist_head sb_pivotroot;
+ struct hlist_head sb_set_mnt_opts;
+ struct hlist_head sb_clone_mnt_opts;
+ struct hlist_head sb_add_mnt_opt;
+ struct hlist_head move_mount;
+ struct hlist_head dentry_init_security;
+ struct hlist_head dentry_create_files_as;
+ struct hlist_head path_notify;
+ struct hlist_head inode_alloc_security;
+ struct hlist_head inode_free_security;
+ struct hlist_head inode_init_security;
+ struct hlist_head inode_create;
+ struct hlist_head inode_link;
+ struct hlist_head inode_unlink;
+ struct hlist_head inode_symlink;
+ struct hlist_head inode_mkdir;
+ struct hlist_head inode_rmdir;
+ struct hlist_head inode_mknod;
+ struct hlist_head inode_rename;
+ struct hlist_head inode_readlink;
+ struct hlist_head inode_follow_link;
+ struct hlist_head inode_permission;
+ struct hlist_head inode_setattr;
+ struct hlist_head inode_getattr;
+ struct hlist_head inode_setxattr;
+ struct hlist_head inode_post_setxattr;
+ struct hlist_head inode_getxattr;
+ struct hlist_head inode_listxattr;
+ struct hlist_head inode_removexattr;
+ struct hlist_head inode_need_killpriv;
+ struct hlist_head inode_killpriv;
+ struct hlist_head inode_getsecurity;
+ struct hlist_head inode_setsecurity;
+ struct hlist_head inode_listsecurity;
+ struct hlist_head inode_getsecid;
+ struct hlist_head inode_copy_up;
+ struct hlist_head inode_copy_up_xattr;
+ struct hlist_head kernfs_init_security;
+ struct hlist_head file_permission;
+ struct hlist_head file_alloc_security;
+ struct hlist_head file_free_security;
+ struct hlist_head file_ioctl;
+ struct hlist_head mmap_addr;
+ struct hlist_head mmap_file;
+ struct hlist_head file_mprotect;
+ struct hlist_head file_lock;
+ struct hlist_head file_fcntl;
+ struct hlist_head file_set_fowner;
+ struct hlist_head file_send_sigiotask;
+ struct hlist_head file_receive;
+ struct hlist_head file_open;
+ struct hlist_head task_alloc;
+ struct hlist_head task_free;
+ struct hlist_head cred_alloc_blank;
+ struct hlist_head cred_free;
+ struct hlist_head cred_prepare;
+ struct hlist_head cred_transfer;
+ struct hlist_head cred_getsecid;
+ struct hlist_head kernel_act_as;
+ struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_module_request;
+ struct hlist_head kernel_load_data;
+ struct hlist_head kernel_read_file;
+ struct hlist_head kernel_post_read_file;
+ struct hlist_head task_fix_setuid;
+ struct hlist_head task_fix_setgid;
+ struct hlist_head task_setpgid;
+ struct hlist_head task_getpgid;
+ struct hlist_head task_getsid;
+ struct hlist_head task_getsecid;
+ struct hlist_head task_setnice;
+ struct hlist_head task_setioprio;
+ struct hlist_head task_getioprio;
+ struct hlist_head task_prlimit;
+ struct hlist_head task_setrlimit;
+ struct hlist_head task_setscheduler;
+ struct hlist_head task_getscheduler;
+ struct hlist_head task_movememory;
+ struct hlist_head task_kill;
+ struct hlist_head task_prctl;
+ struct hlist_head task_to_inode;
+ struct hlist_head ipc_permission;
+ struct hlist_head ipc_getsecid;
+ struct hlist_head msg_msg_alloc_security;
+ struct hlist_head msg_msg_free_security;
+ struct hlist_head msg_queue_alloc_security;
+ struct hlist_head msg_queue_free_security;
+ struct hlist_head msg_queue_associate;
+ struct hlist_head msg_queue_msgctl;
+ struct hlist_head msg_queue_msgsnd;
+ struct hlist_head msg_queue_msgrcv;
+ struct hlist_head shm_alloc_security;
+ struct hlist_head shm_free_security;
+ struct hlist_head shm_associate;
+ struct hlist_head shm_shmctl;
+ struct hlist_head shm_shmat;
+ struct hlist_head sem_alloc_security;
+ struct hlist_head sem_free_security;
+ struct hlist_head sem_associate;
+ struct hlist_head sem_semctl;
+ struct hlist_head sem_semop;
+ struct hlist_head netlink_send;
+ struct hlist_head d_instantiate;
+ struct hlist_head getprocattr;
+ struct hlist_head setprocattr;
+ struct hlist_head ismaclabel;
+ struct hlist_head secid_to_secctx;
+ struct hlist_head secctx_to_secid;
+ struct hlist_head release_secctx;
+ struct hlist_head inode_invalidate_secctx;
+ struct hlist_head inode_notifysecctx;
+ struct hlist_head inode_setsecctx;
+ struct hlist_head inode_getsecctx;
+ struct hlist_head audit_rule_init;
+ struct hlist_head audit_rule_known;
+ struct hlist_head audit_rule_match;
+ struct hlist_head audit_rule_free;
+ struct hlist_head bpf;
+ struct hlist_head bpf_map;
+ struct hlist_head bpf_prog;
+ struct hlist_head bpf_map_alloc_security;
+ struct hlist_head bpf_map_free_security;
+ struct hlist_head bpf_prog_alloc_security;
+ struct hlist_head bpf_prog_free_security;
+ struct hlist_head locked_down;
+ struct hlist_head perf_event_open;
+ struct hlist_head perf_event_alloc;
+ struct hlist_head perf_event_free;
+ struct hlist_head perf_event_read;
+ struct hlist_head perf_event_write;
+};
+
+struct lsm_blob_sizes {
+ int lbs_cred;
+ int lbs_file;
+ int lbs_inode;
+ int lbs_ipc;
+ int lbs_msg_msg;
+ int lbs_task;
+};
+
+enum lsm_order {
+ LSM_ORDER_FIRST = -1,
+ LSM_ORDER_MUTABLE = 0,
+};
+
+struct lsm_info {
+ const char *name;
+ enum lsm_order order;
+ long unsigned int flags;
+ int *enabled;
+ int (*init)();
+ struct lsm_blob_sizes *blobs;
+};
+
+enum bpf_audit {
+ BPF_AUDIT_LOAD = 0,
+ BPF_AUDIT_UNLOAD = 1,
+ BPF_AUDIT_MAX = 2,
+};
+
+struct bpf_tracing_link {
+ struct bpf_link link;
+ enum bpf_attach_type attach_type;
+};
+
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+};
+
+struct btf_member {
+ __u32 name_off;
+ __u32 type;
+ __u32 offset;
+};
+
+enum btf_func_linkage {
+ BTF_FUNC_STATIC = 0,
+ BTF_FUNC_GLOBAL = 1,
+ BTF_FUNC_EXTERN = 2,
+};
+
+struct bpf_verifier_log {
+ u32 level;
+ char kbuf[1024];
+ char *ubuf;
+ u32 len_used;
+ u32 len_total;
+};
+
+struct bpf_subprog_info {
+ u32 start;
+ u32 linfo_idx;
+ u16 stack_depth;
+};
+
+struct bpf_verifier_stack_elem;
+
+struct bpf_verifier_state;
+
+struct bpf_verifier_state_list;
+
+struct bpf_insn_aux_data;
+
+struct bpf_verifier_env {
+ u32 insn_idx;
+ u32 prev_insn_idx;
+ struct bpf_prog *prog;
+ const struct bpf_verifier_ops *ops;
+ struct bpf_verifier_stack_elem *head;
+ int stack_size;
+ bool strict_alignment;
+ bool test_state_freq;
+ struct bpf_verifier_state *cur_state;
+ struct bpf_verifier_state_list **explored_states;
+ struct bpf_verifier_state_list *free_list;
+ struct bpf_map *used_maps[64];
+ u32 used_map_cnt;
+ u32 id_gen;
+ bool allow_ptr_leaks;
+ bool allow_ptr_to_map_access;
+ bool bpf_capable;
+ bool bypass_spec_v1;
+ bool bypass_spec_v4;
+ bool seen_direct_write;
+ struct bpf_insn_aux_data *insn_aux_data;
+ const struct bpf_line_info *prev_linfo;
+ struct bpf_verifier_log log;
+ struct bpf_subprog_info subprog_info[257];
+ struct {
+ int *insn_state;
+ int *insn_stack;
+ int cur_stack;
+ } cfg;
+ u32 pass_cnt;
+ u32 subprog_cnt;
+ u32 prev_insn_processed;
+ u32 insn_processed;
+ u32 prev_jmps_processed;
+ u32 jmps_processed;
+ u64 verification_time;
+ u32 max_states_per_insn;
+ u32 total_states;
+ u32 peak_states;
+ u32 longest_mark_read_walk;
+};
+
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *);
+ int (*check_member)(const struct btf_type *, const struct btf_member *);
+ int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *);
+ int (*reg)(void *);
+ void (*unreg)(void *);
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ const char *name;
+ struct btf_func_model func_models[64];
+ u32 type_id;
+ u32 value_id;
+};
+
+typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *);
+
+struct tnum {
+ u64 value;
+ u64 mask;
+};
+
+enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0,
+ REG_LIVE_READ32 = 1,
+ REG_LIVE_READ64 = 2,
+ REG_LIVE_READ = 3,
+ REG_LIVE_WRITTEN = 4,
+ REG_LIVE_DONE = 8,
+};
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ union {
+ u16 range;
+ struct bpf_map *map_ptr;
+ u32 btf_id;
+ u32 mem_size;
+ long unsigned int raw;
+ };
+ s32 off;
+ u32 id;
+ u32 ref_obj_id;
+ struct tnum var_off;
+ s64 smin_value;
+ s64 smax_value;
+ u64 umin_value;
+ u64 umax_value;
+ s32 s32_min_value;
+ s32 s32_max_value;
+ u32 u32_min_value;
+ u32 u32_max_value;
+ struct bpf_reg_state *parent;
+ u32 frameno;
+ s32 subreg_def;
+ enum bpf_reg_liveness live;
+ bool precise;
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID = 0,
+ STACK_SPILL = 1,
+ STACK_MISC = 2,
+ STACK_ZERO = 3,
+};
+
+struct bpf_stack_state {
+ struct bpf_reg_state spilled_ptr;
+ u8 slot_type[8];
+};
+
+struct bpf_reference_state {
+ int id;
+ int insn_idx;
+};
+
+struct bpf_func_state {
+ struct bpf_reg_state regs[11];
+ int callsite;
+ u32 frameno;
+ u32 subprogno;
+ int acquired_refs;
+ struct bpf_reference_state *refs;
+ int allocated_stack;
+ struct bpf_stack_state *stack;
+};
+
+struct bpf_idx_pair {
+ u32 prev_idx;
+ u32 idx;
+};
+
+struct bpf_verifier_state {
+ struct bpf_func_state *frame[8];
+ struct bpf_verifier_state *parent;
+ u32 branches;
+ u32 insn_idx;
+ u32 curframe;
+ u32 active_spin_lock;
+ bool speculative;
+ u32 first_insn_idx;
+ u32 last_insn_idx;
+ struct bpf_idx_pair *jmp_history;
+ u32 jmp_history_cnt;
+};
+
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+ int miss_cnt;
+ int hit_cnt;
+};
+
+struct bpf_insn_aux_data {
+ union {
+ enum bpf_reg_type ptr_type;
+ long unsigned int map_ptr_state;
+ s32 call_imm;
+ u32 alu_limit;
+ struct {
+ u32 map_index;
+ u32 map_off;
+ };
+ };
+ u64 map_key_state;
+ int ctx_field_size;
+ int sanitize_stack_off;
+ u32 seen;
+ bool zext_dst;
+ u8 alu_state;
+ unsigned int orig_idx;
+ bool prune_point;
+};
+
+struct bpf_verifier_stack_elem {
+ struct bpf_verifier_state st;
+ int insn_idx;
+ int prev_insn_idx;
+ struct bpf_verifier_stack_elem *next;
+ u32 log_pos;
+};
+
+typedef void (*bpf_insn_print_t)(void *, const char *, ...);
+
+typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *);
+
+typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64);
+
+struct bpf_insn_cbs {
+ bpf_insn_print_t cb_print;
+ bpf_insn_revmap_call_t cb_call;
+ bpf_insn_print_imm_t cb_imm;
+ void *private_data;
+};
+
+struct bpf_call_arg_meta {
+ struct bpf_map *map_ptr;
+ bool raw_mode;
+ bool pkt_access;
+ int regno;
+ int access_size;
+ int mem_size;
+ u64 msize_max_value;
+ int ref_obj_id;
+ int func_id;
+ u32 btf_id;
+};
+
+enum reg_arg_type {
+ SRC_OP = 0,
+ DST_OP = 1,
+ DST_OP_NO_MARK = 2,
+};
+
+enum {
+ DISCOVERED = 16,
+ EXPLORED = 32,
+ FALLTHROUGH = 1,
+ BRANCH = 2,
+};
+
+struct idpair {
+ u32 old;
+ u32 cur;
+};
+
+struct tree_descr {
+ const char *name;
+ const struct file_operations *ops;
+ int mode;
+};
+
+enum bpf_type {
+ BPF_TYPE_UNSPEC = 0,
+ BPF_TYPE_PROG = 1,
+ BPF_TYPE_MAP = 2,
+ BPF_TYPE_LINK = 3,
+};
+
+struct map_iter {
+ void *key;
+ bool done;
+};
+
+enum {
+ OPT_MODE = 0,
+};
+
+struct bpf_mount_opts {
+ umode_t mode;
+};
+
+struct bpf_pidns_info {
+ __u32 pid;
+ __u32 tgid;
+};
+
+typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *);
+
+typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64);
+
+typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *);
+
+typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64);
+
+typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *);
+
+typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *);
+
+typedef u64 (*btf_bpf_get_smp_processor_id)();
+
+typedef u64 (*btf_bpf_get_numa_node_id)();
+
+typedef u64 (*btf_bpf_ktime_get_ns)();
+
+typedef u64 (*btf_bpf_ktime_get_boot_ns)();
+
+typedef u64 (*btf_bpf_get_current_pid_tgid)();
+
+typedef u64 (*btf_bpf_get_current_uid_gid)();
+
+typedef u64 (*btf_bpf_get_current_comm)(char *, u32);
+
+typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *);
+
+typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *);
+
+typedef u64 (*btf_bpf_jiffies64)();
+
+typedef u64 (*btf_bpf_get_current_cgroup_id)();
+
+typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int);
+
+typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64);
+
+typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, long int *);
+
+typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, long unsigned int *);
+
+typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32);
+
+typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64);
+
+typedef int (*bpf_iter_init_seq_priv_t)(void *);
+
+typedef void (*bpf_iter_fini_seq_priv_t)(void *);
+
+struct bpf_iter_reg {
+ const char *target;
+ const struct seq_operations *seq_ops;
+ bpf_iter_init_seq_priv_t init_seq_private;
+ bpf_iter_fini_seq_priv_t fini_seq_private;
+ u32 seq_priv_size;
+ u32 ctx_arg_info_size;
+ struct bpf_ctx_arg_aux ctx_arg_info[2];
+};
+
+struct bpf_iter_meta {
+ union {
+ struct seq_file *seq;
+ };
+ u64 session_id;
+ u64 seq_num;
+};
+
+struct bpf_iter_target_info {
+ struct list_head list;
+ const struct bpf_iter_reg *reg_info;
+ u32 btf_id;
+};
+
+struct bpf_iter_link {
+ struct bpf_link link;
+ struct bpf_iter_target_info *tinfo;
+};
+
+struct bpf_iter_priv_data {
+ struct bpf_iter_target_info *tinfo;
+ struct bpf_prog *prog;
+ u64 session_id;
+ u64 seq_num;
+ bool done_stop;
+ long: 56;
+ u8 target_private[0];
+};
+
+struct bpf_iter_seq_map_info {
+ u32 mid;
+};
+
+struct bpf_iter__bpf_map {
+ union {
+ struct bpf_iter_meta *meta;
+ };
+ union {
+ struct bpf_map *map;
+ };
+};
+
+struct bpf_iter_seq_task_common {
+ struct pid_namespace *ns;
+};
+
+struct bpf_iter_seq_task_info {
+ struct bpf_iter_seq_task_common common;
+ u32 tid;
+};
+
+struct bpf_iter__task {
+ union {
+ struct bpf_iter_meta *meta;
+ };
+ union {
+ struct task_struct *task;
+ };
+};
+
+struct bpf_iter_seq_task_file_info {
+ struct bpf_iter_seq_task_common common;
+ struct task_struct *task;
+ struct files_struct *files;
+ u32 tid;
+ u32 fd;
+};
+
+struct bpf_iter__task_file {
+ union {
+ struct bpf_iter_meta *meta;
+ };
+ union {
+ struct task_struct *task;
+ };
+ u32 fd;
+ union {
+ struct file *file;
+ };
+};
+
+struct pcpu_freelist_node;
+
+struct pcpu_freelist_head {
+ struct pcpu_freelist_node *first;
+ raw_spinlock_t lock;
+};
+
+struct pcpu_freelist_node {
+ struct pcpu_freelist_node *next;
+};
+
+struct pcpu_freelist {
+ struct pcpu_freelist_head *freelist;
+};
+
+struct bpf_lru_node {
+ struct list_head list;
+ u16 cpu;
+ u8 type;
+ u8 ref;
+};
+
+struct bpf_lru_list {
+ struct list_head lists[3];
+ unsigned int counts[2];
+ struct list_head *next_inactive_rotation;
+ raw_spinlock_t lock;
+};
+
+struct bpf_lru_locallist {
+ struct list_head lists[2];
+ u16 next_steal;
+ raw_spinlock_t lock;
+};
+
+struct bpf_common_lru {
+ struct bpf_lru_list lru_list;
+ struct bpf_lru_locallist *local_list;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *);
+
+struct bpf_lru {
+ union {
+ struct bpf_common_lru common_lru;
+ struct bpf_lru_list *percpu_lru;
+ };
+ del_from_htab_func del_from_htab;
+ void *del_arg;
+ unsigned int hash_offset;
+ unsigned int nr_scans;
+ bool percpu;
+ long: 56;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bucket {
+ struct hlist_nulls_head head;
+ union {
+ raw_spinlock_t raw_lock;
+ spinlock_t lock;
+ };
+};
+
+struct htab_elem;
+
+struct bpf_htab {
+ struct bpf_map map;
+ struct bucket *buckets;
+ void *elems;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ union {
+ struct pcpu_freelist freelist;
+ struct bpf_lru lru;
+ };
+ struct htab_elem **extra_elems;
+ atomic_t count;
+ u32 n_buckets;
+ u32 elem_size;
+ u32 hashrnd;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct htab_elem {
+ union {
+ struct hlist_nulls_node hash_node;
+ struct {
+ void *padding;
+ union {
+ struct bpf_htab *htab;
+ struct pcpu_freelist_node fnode;
+ struct htab_elem *batch_flink;
+ };
+ };
+ };
+ union {
+ struct callback_head rcu;
+ struct bpf_lru_node lru_node;
+ };
+ u32 hash;
+ int: 32;
+ char key[0];
+};
+
+struct prog_poke_elem {
+ struct list_head list;
+ struct bpf_prog_aux *aux;
+};
+
+enum bpf_lru_list_type {
+ BPF_LRU_LIST_T_ACTIVE = 0,
+ BPF_LRU_LIST_T_INACTIVE = 1,
+ BPF_LRU_LIST_T_FREE = 2,
+ BPF_LRU_LOCAL_LIST_T_FREE = 3,
+ BPF_LRU_LOCAL_LIST_T_PENDING = 4,
+};
+
+struct bpf_lpm_trie_key {
+ __u32 prefixlen;
+ __u8 data[0];
+};
+
+struct lpm_trie_node {
+ struct callback_head rcu;
+ struct lpm_trie_node *child[2];
+ u32 prefixlen;
+ u32 flags;
+ u8 data[0];
+};
+
+struct lpm_trie {
+ struct bpf_map map;
+ struct lpm_trie_node *root;
+ size_t n_entries;
+ size_t max_prefixlen;
+ size_t data_size;
+ spinlock_t lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_cgroup_storage_map {
+ struct bpf_map map;
+ spinlock_t lock;
+ struct bpf_prog_aux *aux;
+ struct rb_root root;
+ struct list_head list;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_queue_stack {
+ struct bpf_map map;
+ raw_spinlock_t lock;
+ u32 head;
+ u32 tail;
+ u32 size;
+ int: 32;
+ char elements[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ BPF_RB_NO_WAKEUP = 1,
+ BPF_RB_FORCE_WAKEUP = 2,
+};
+
+enum {
+ BPF_RB_AVAIL_DATA = 0,
+ BPF_RB_RING_SIZE = 1,
+ BPF_RB_CONS_POS = 2,
+ BPF_RB_PROD_POS = 3,
+};
+
+enum {
+ BPF_RINGBUF_BUSY_BIT = -2147483648,
+ BPF_RINGBUF_DISCARD_BIT = 1073741824,
+ BPF_RINGBUF_HDR_SZ = 8,
+};
+
+struct bpf_ringbuf {
+ wait_queue_head_t waitq;
+ struct irq_work work;
+ u64 mask;
+ struct page **pages;
+ int nr_pages;
+ spinlock_t spinlock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long unsigned int consumer_pos;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long unsigned int producer_pos;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ char data[0];
+};
+
+struct bpf_ringbuf_map {
+ struct bpf_map map;
+ struct bpf_map_memory memory;
+ struct bpf_ringbuf *rb;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_ringbuf_hdr {
+ u32 len;
+ u32 pg_off;
+};
+
+typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64);
+
+typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64);
+
+typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64);
+
+typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64);
+
+typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64);
+
+enum perf_record_ksymbol_type {
+ PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
+ PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ PERF_RECORD_KSYMBOL_TYPE_MAX = 2,
+};
+
+struct btf_enum {
+ __u32 name_off;
+ __s32 val;
+};
+
+struct btf_array {
+ __u32 type;
+ __u32 index_type;
+ __u32 nelems;
+};
+
+struct btf_param {
+ __u32 name_off;
+ __u32 type;
+};
+
+enum {
+ BTF_VAR_STATIC = 0,
+ BTF_VAR_GLOBAL_ALLOCATED = 1,
+ BTF_VAR_GLOBAL_EXTERN = 2,
+};
+
+struct btf_var {
+ __u32 linkage;
+};
+
+struct btf_var_secinfo {
+ __u32 type;
+ __u32 offset;
+ __u32 size;
+};
+
+struct bpf_flow_keys {
+ __u16 nhoff;
+ __u16 thoff;
+ __u16 addr_proto;
+ __u8 is_frag;
+ __u8 is_first_frag;
+ __u8 is_encap;
+ __u8 ip_proto;
+ __be16 n_proto;
+ __be16 sport;
+ __be16 dport;
+ union {
+ struct {
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ };
+ struct {
+ __u32 ipv6_src[4];
+ __u32 ipv6_dst[4];
+ };
+ };
+ __u32 flags;
+ __be32 flow_label;
+};
+
+struct bpf_sock {
+ __u32 bound_dev_if;
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+ __u32 mark;
+ __u32 priority;
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port;
+ __u32 dst_port;
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+ __s32 rx_queue_mapping;
+};
+
+struct __sk_buff {
+ __u32 len;
+ __u32 pkt_type;
+ __u32 mark;
+ __u32 queue_mapping;
+ __u32 protocol;
+ __u32 vlan_present;
+ __u32 vlan_tci;
+ __u32 vlan_proto;
+ __u32 priority;
+ __u32 ingress_ifindex;
+ __u32 ifindex;
+ __u32 tc_index;
+ __u32 cb[5];
+ __u32 hash;
+ __u32 tc_classid;
+ __u32 data;
+ __u32 data_end;
+ __u32 napi_id;
+ __u32 family;
+ __u32 remote_ip4;
+ __u32 local_ip4;
+ __u32 remote_ip6[4];
+ __u32 local_ip6[4];
+ __u32 remote_port;
+ __u32 local_port;
+ __u32 data_meta;
+ union {
+ struct bpf_flow_keys *flow_keys;
+ };
+ __u64 tstamp;
+ __u32 wire_len;
+ __u32 gso_segs;
+ union {
+ struct bpf_sock *sk;
+ };
+ __u32 gso_size;
+};
+
+struct xdp_md {
+ __u32 data;
+ __u32 data_end;
+ __u32 data_meta;
+ __u32 ingress_ifindex;
+ __u32 rx_queue_index;
+ __u32 egress_ifindex;
+};
+
+struct sk_msg_md {
+ union {
+ void *data;
+ };
+ union {
+ void *data_end;
+ };
+ __u32 family;
+ __u32 remote_ip4;
+ __u32 local_ip4;
+ __u32 remote_ip6[4];
+ __u32 local_ip6[4];
+ __u32 remote_port;
+ __u32 local_port;
+ __u32 size;
+ union {
+ struct bpf_sock *sk;
+ };
+};
+
+struct sk_reuseport_md {
+ union {
+ void *data;
+ };
+ union {
+ void *data_end;
+ };
+ __u32 len;
+ __u32 eth_protocol;
+ __u32 ip_protocol;
+ __u32 bind_inany;
+ __u32 hash;
+};
+
+struct bpf_sock_addr {
+ __u32 user_family;
+ __u32 user_ip4;
+ __u32 user_ip6[4];
+ __u32 user_port;
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+ __u32 msg_src_ip4;
+ __u32 msg_src_ip6[4];
+ union {
+ struct bpf_sock *sk;
+ };
+};
+
+struct bpf_sock_ops {
+ __u32 op;
+ union {
+ __u32 args[4];
+ __u32 reply;
+ __u32 replylong[4];
+ };
+ __u32 family;
+ __u32 remote_ip4;
+ __u32 local_ip4;
+ __u32 remote_ip6[4];
+ __u32 local_ip6[4];
+ __u32 remote_port;
+ __u32 local_port;
+ __u32 is_fullsock;
+ __u32 snd_cwnd;
+ __u32 srtt_us;
+ __u32 bpf_sock_ops_cb_flags;
+ __u32 state;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u32 sk_txhash;
+ __u64 bytes_received;
+ __u64 bytes_acked;
+ union {
+ struct bpf_sock *sk;
+ };
+};
+
+struct bpf_cgroup_dev_ctx {
+ __u32 access_type;
+ __u32 major;
+ __u32 minor;
+};
+
+struct bpf_sysctl {
+ __u32 write;
+ __u32 file_pos;
+};
+
+struct bpf_sockopt {
+ union {
+ struct bpf_sock *sk;
+ };
+ union {
+ void *optval;
+ };
+ union {
+ void *optval_end;
+ };
+ __s32 level;
+ __s32 optname;
+ __s32 optlen;
+ __s32 retval;
+};
+
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
+
+struct bpf_flow_dissector {
+ struct bpf_flow_keys *flow_keys;
+ const struct sk_buff *skb;
+ void *data;
+ void *data_end;
+};
+
+struct flowi4 {
+ struct flowi_common __fl_common;
+ __be32 saddr;
+ __be32 daddr;
+ union flowi_uli uli;
+};
+
+struct flowidn {
+ struct flowi_common __fl_common;
+ __le16 daddr;
+ __le16 saddr;
+ union flowi_uli uli;
+};
+
+struct flowi {
+ union {
+ struct flowi_common __fl_common;
+ struct flowi4 ip4;
+ struct flowi6 ip6;
+ struct flowidn dn;
+ } u;
+};
+
+struct inet_listen_hashbucket {
+ spinlock_t lock;
+ unsigned int count;
+ union {
+ struct hlist_head head;
+ struct hlist_nulls_head nulls_head;
+ };
+};
+
+struct inet_ehash_bucket;
+
+struct inet_bind_hashbucket;
+
+struct inet_hashinfo {
+ struct inet_ehash_bucket *ehash;
+ spinlock_t *ehash_locks;
+ unsigned int ehash_mask;
+ unsigned int ehash_locks_mask;
+ struct kmem_cache *bind_bucket_cachep;
+ struct inet_bind_hashbucket *bhash;
+ unsigned int bhash_size;
+ unsigned int lhash2_mask;
+ struct inet_listen_hashbucket *lhash2;
+ long: 64;
+ struct inet_listen_hashbucket listening_hash[32];
+};
+
+struct ip_ra_chain {
+ struct ip_ra_chain *next;
+ struct sock *sk;
+ union {
+ void (*destructor)(struct sock *);
+ struct sock *saved_sk;
+ };
+ struct callback_head rcu;
+};
+
+struct inet_peer_base {
+ struct rb_root rb_root;
+ seqlock_t lock;
+ int total;
+};
+
+struct tcp_fastopen_context {
+ siphash_key_t key[2];
+ int num;
+ struct callback_head rcu;
+};
+
+struct in_addr {
+ __be32 s_addr;
+};
+
+struct xdp_txq_info {
+ struct net_device *dev;
+};
+
+struct xdp_buff {
+ void *data;
+ void *data_end;
+ void *data_meta;
+ void *data_hard_start;
+ long unsigned int handle;
+ struct xdp_rxq_info *rxq;
+ struct xdp_txq_info *txq;
+ u32 frame_sz;
+};
+
+struct bpf_sock_addr_kern {
+ struct sock *sk;
+ struct sockaddr *uaddr;
+ u64 tmp_reg;
+ void *t_ctx;
+};
+
+struct bpf_sock_ops_kern {
+ struct sock *sk;
+ u32 op;
+ union {
+ u32 args[4];
+ u32 reply;
+ u32 replylong[4];
+ };
+ u32 is_fullsock;
+ u64 temp;
+};
+
+struct bpf_sysctl_kern {
+ struct ctl_table_header *head;
+ struct ctl_table *table;
+ void *cur_val;
+ size_t cur_len;
+ void *new_val;
+ size_t new_len;
+ int new_updated;
+ int write;
+ loff_t *ppos;
+ u64 tmp_reg;
+};
+
+struct bpf_sockopt_kern {
+ struct sock *sk;
+ u8 *optval;
+ u8 *optval_end;
+ s32 level;
+ s32 optname;
+ s32 optlen;
+ s32 retval;
+};
+
+struct sock_reuseport {
+ struct callback_head rcu;
+ u16 max_socks;
+ u16 num_socks;
+ unsigned int synq_overflow_ts;
+ unsigned int reuseport_id;
+ unsigned int bind_inany: 1;
+ unsigned int has_conns: 1;
+ struct bpf_prog *prog;
+ struct sock *socks[0];
+};
+
+struct tcp_fastopen_cookie {
+ __le64 val[2];
+ s8 len;
+ bool exp;
+};
+
+enum tcp_synack_type {
+ TCP_SYNACK_NORMAL = 0,
+ TCP_SYNACK_FASTOPEN = 1,
+ TCP_SYNACK_COOKIE = 2,
+};
+
+struct tcp_md5sig_key;
+
+struct tcp_request_sock_ops {
+ u16 mss_clamp;
+ struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *);
+ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *);
+ void (*init_req)(struct request_sock *, const struct sock *, struct sk_buff *);
+ __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *);
+ struct dst_entry * (*route_req)(const struct sock *, struct flowi *, const struct request_sock *);
+ u32 (*init_seq)(const struct sk_buff *);
+ u32 (*init_ts_off)(const struct net *, const struct sk_buff *);
+ int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type);
+};
+
+union tcp_md5_addr {
+ struct in_addr a4;
+ struct in6_addr a6;
+};
+
+struct tcp_md5sig_key {
+ struct hlist_node node;
+ u8 keylen;
+ u8 family;
+ u8 prefixlen;
+ union tcp_md5_addr addr;
+ int l3index;
+ u8 key[80];
+ struct callback_head rcu;
+};
+
+struct ip_rt_acct {
+ __u32 o_bytes;
+ __u32 o_packets;
+ __u32 i_bytes;
+ __u32 i_packets;
+};
+
+struct inet_ehash_bucket {
+ struct hlist_nulls_head chain;
+};
+
+struct inet_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+struct ack_sample {
+ u32 pkts_acked;
+ s32 rtt_us;
+ u32 in_flight;
+};
+
+struct rate_sample {
+ u64 prior_mstamp;
+ u32 prior_delivered;
+ s32 delivered;
+ long int interval_us;
+ u32 snd_interval_us;
+ u32 rcv_interval_us;
+ long int rtt_us;
+ int losses;
+ u32 acked_sacked;
+ u32 prior_in_flight;
+ bool is_app_limited;
+ bool is_retrans;
+ bool is_ack_delayed;
+};
+
+struct sk_msg_sg {
+ u32 start;
+ u32 curr;
+ u32 end;
+ u32 size;
+ u32 copybreak;
+ long unsigned int copy;
+ struct scatterlist data[19];
+};
+
+struct sk_msg {
+ struct sk_msg_sg sg;
+ void *data;
+ void *data_end;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 flags;
+ struct sk_buff *skb;
+ struct sock *sk_redir;
+ struct sock *sk;
+ struct list_head list;
+};
+
+enum verifier_phase {
+ CHECK_META = 0,
+ CHECK_TYPE = 1,
+};
+
+struct resolve_vertex {
+ const struct btf_type *t;
+ u32 type_id;
+ u16 next_member;
+};
+
+enum visit_state {
+ NOT_VISITED = 0,
+ VISITED = 1,
+ RESOLVED = 2,
+};
+
+enum resolve_mode {
+ RESOLVE_TBD = 0,
+ RESOLVE_PTR = 1,
+ RESOLVE_STRUCT_OR_ARRAY = 2,
+};
+
+struct btf_sec_info {
+ u32 off;
+ u32 len;
+};
+
+struct btf_verifier_env {
+ struct btf *btf;
+ u8 *visit_states;
+ struct resolve_vertex stack[32];
+ struct bpf_verifier_log log;
+ u32 log_type_id;
+ u32 top_stack;
+ enum verifier_phase phase;
+ enum resolve_mode resolve_mode;
+};
+
+struct btf_kind_operations {
+ s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32);
+ int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *);
+ int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *);
+ int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *);
+ void (*log_details)(struct btf_verifier_env *, const struct btf_type *);
+ void (*seq_show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct seq_file *);
+};
+
+struct bpf_ctx_convert {
+ struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog;
+ struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern;
+ struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog;
+ struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern;
+ struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog;
+ struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern;
+ struct xdp_md BPF_PROG_TYPE_XDP_prog;
+ struct xdp_buff BPF_PROG_TYPE_XDP_kern;
+ struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog;
+ struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern;
+ struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog;
+ struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern;
+ struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog;
+ struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern;
+ struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog;
+ struct sk_buff BPF_PROG_TYPE_LWT_IN_kern;
+ struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog;
+ struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern;
+ struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog;
+ struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern;
+ struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog;
+ struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern;
+ struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog;
+ struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern;
+ struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog;
+ struct sk_buff BPF_PROG_TYPE_SK_SKB_kern;
+ struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog;
+ struct sk_msg BPF_PROG_TYPE_SK_MSG_kern;
+ struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog;
+ struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern;
+ bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog;
+ struct pt_regs BPF_PROG_TYPE_KPROBE_kern;
+ __u64 BPF_PROG_TYPE_TRACEPOINT_prog;
+ u64 BPF_PROG_TYPE_TRACEPOINT_kern;
+ struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog;
+ struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern;
+ struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog;
+ u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern;
+ struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog;
+ u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern;
+ void *BPF_PROG_TYPE_TRACING_prog;
+ void *BPF_PROG_TYPE_TRACING_kern;
+ struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog;
+ struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern;
+ struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog;
+ struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern;
+ struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog;
+ struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern;
+ struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog;
+ struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern;
+ void *BPF_PROG_TYPE_STRUCT_OPS_prog;
+ void *BPF_PROG_TYPE_STRUCT_OPS_kern;
+ void *BPF_PROG_TYPE_EXT_prog;
+ void *BPF_PROG_TYPE_EXT_kern;
+};
+
+enum {
+ __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0,
+ __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1,
+ __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2,
+ __ctx_convertBPF_PROG_TYPE_XDP = 3,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6,
+ __ctx_convertBPF_PROG_TYPE_LWT_IN = 7,
+ __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8,
+ __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9,
+ __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10,
+ __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11,
+ __ctx_convertBPF_PROG_TYPE_SK_SKB = 12,
+ __ctx_convertBPF_PROG_TYPE_SK_MSG = 13,
+ __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14,
+ __ctx_convertBPF_PROG_TYPE_KPROBE = 15,
+ __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16,
+ __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17,
+ __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18,
+ __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19,
+ __ctx_convertBPF_PROG_TYPE_TRACING = 20,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22,
+ __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23,
+ __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24,
+ __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 25,
+ __ctx_convertBPF_PROG_TYPE_EXT = 26,
+ __ctx_convert_unused = 27,
+};
+
+struct bpf_devmap_val {
+ __u32 ifindex;
+ union {
+ int fd;
+ __u32 id;
+ } bpf_prog;
+};
+
+enum net_device_flags {
+ IFF_UP = 1,
+ IFF_BROADCAST = 2,
+ IFF_DEBUG = 4,
+ IFF_LOOPBACK = 8,
+ IFF_POINTOPOINT = 16,
+ IFF_NOTRAILERS = 32,
+ IFF_RUNNING = 64,
+ IFF_NOARP = 128,
+ IFF_PROMISC = 256,
+ IFF_ALLMULTI = 512,
+ IFF_MASTER = 1024,
+ IFF_SLAVE = 2048,
+ IFF_MULTICAST = 4096,
+ IFF_PORTSEL = 8192,
+ IFF_AUTOMEDIA = 16384,
+ IFF_DYNAMIC = 32768,
+ IFF_LOWER_UP = 65536,
+ IFF_DORMANT = 131072,
+ IFF_ECHO = 262144,
+};
+
+struct xdp_dev_bulk_queue {
+ struct xdp_frame *q[16];
+ struct list_head flush_node;
+ struct net_device *dev;
+ struct net_device *dev_rx;
+ unsigned int count;
+};
+
+enum netdev_cmd {
+ NETDEV_UP = 1,
+ NETDEV_DOWN = 2,
+ NETDEV_REBOOT = 3,
+ NETDEV_CHANGE = 4,
+ NETDEV_REGISTER = 5,
+ NETDEV_UNREGISTER = 6,
+ NETDEV_CHANGEMTU = 7,
+ NETDEV_CHANGEADDR = 8,
+ NETDEV_PRE_CHANGEADDR = 9,
+ NETDEV_GOING_DOWN = 10,
+ NETDEV_CHANGENAME = 11,
+ NETDEV_FEAT_CHANGE = 12,
+ NETDEV_BONDING_FAILOVER = 13,
+ NETDEV_PRE_UP = 14,
+ NETDEV_PRE_TYPE_CHANGE = 15,
+ NETDEV_POST_TYPE_CHANGE = 16,
+ NETDEV_POST_INIT = 17,
+ NETDEV_RELEASE = 18,
+ NETDEV_NOTIFY_PEERS = 19,
+ NETDEV_JOIN = 20,
+ NETDEV_CHANGEUPPER = 21,
+ NETDEV_RESEND_IGMP = 22,
+ NETDEV_PRECHANGEMTU = 23,
+ NETDEV_CHANGEINFODATA = 24,
+ NETDEV_BONDING_INFO = 25,
+ NETDEV_PRECHANGEUPPER = 26,
+ NETDEV_CHANGELOWERSTATE = 27,
+ NETDEV_UDP_TUNNEL_PUSH_INFO = 28,
+ NETDEV_UDP_TUNNEL_DROP_INFO = 29,
+ NETDEV_CHANGE_TX_QUEUE_LEN = 30,
+ NETDEV_CVLAN_FILTER_PUSH_INFO = 31,
+ NETDEV_CVLAN_FILTER_DROP_INFO = 32,
+ NETDEV_SVLAN_FILTER_PUSH_INFO = 33,
+ NETDEV_SVLAN_FILTER_DROP_INFO = 34,
+};
+
+struct netdev_notifier_info {
+ struct net_device *dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct bpf_dtab;
+
+struct bpf_dtab_netdev {
+ struct net_device *dev;
+ struct hlist_node index_hlist;
+ struct bpf_dtab *dtab;
+ struct bpf_prog *xdp_prog;
+ struct callback_head rcu;
+ unsigned int idx;
+ struct bpf_devmap_val val;
+};
+
+struct bpf_dtab {
+ struct bpf_map map;
+ struct bpf_dtab_netdev **netdev_map;
+ struct list_head list;
+ struct hlist_head *dev_index_head;
+ spinlock_t index_lock;
+ unsigned int items;
+ u32 n_buckets;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef struct bio_vec skb_frag_t;
+
+struct skb_shared_hwtstamps {
+ ktime_t hwtstamp;
+};
+
+struct skb_shared_info {
+ __u8 __unused;
+ __u8 meta_len;
+ __u8 nr_frags;
+ __u8 tx_flags;
+ short unsigned int gso_size;
+ short unsigned int gso_segs;
+ struct sk_buff *frag_list;
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned int gso_type;
+ u32 tskey;
+ atomic_t dataref;
+ void *destructor_arg;
+ skb_frag_t frags[17];
+};
+
+struct ptr_ring {
+ int producer;
+ spinlock_t producer_lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ int consumer_head;
+ int consumer_tail;
+ spinlock_t consumer_lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ int size;
+ int batch;
+ void **queue;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_cpu_map_entry;
+
+struct xdp_bulk_queue {
+ void *q[8];
+ struct list_head flush_node;
+ struct bpf_cpu_map_entry *obj;
+ unsigned int count;
+};
+
+struct bpf_cpu_map;
+
+struct bpf_cpu_map_entry {
+ u32 cpu;
+ int map_id;
+ u32 qsize;
+ struct xdp_bulk_queue *bulkq;
+ struct bpf_cpu_map *cmap;
+ struct ptr_ring *queue;
+ struct task_struct *kthread;
+ struct work_struct kthread_stop_wq;
+ atomic_t refcnt;
+ struct callback_head rcu;
+};
+
+struct bpf_cpu_map {
+ struct bpf_map map;
+ struct bpf_cpu_map_entry **cpu_map;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head *next;
+};
+
+struct bpf_prog_offload_ops {
+ int (*insn_hook)(struct bpf_verifier_env *, int, int);
+ int (*finalize)(struct bpf_verifier_env *);
+ int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *);
+ int (*remove_insns)(struct bpf_verifier_env *, u32, u32);
+ int (*prepare)(struct bpf_prog *);
+ int (*translate)(struct bpf_prog *);
+ void (*destroy)(struct bpf_prog *);
+};
+
+struct bpf_offload_dev {
+ const struct bpf_prog_offload_ops *ops;
+ struct list_head netdevs;
+ void *priv;
+};
+
+struct bpf_offload_netdev {
+ struct rhash_head l;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ struct list_head progs;
+ struct list_head maps;
+ struct list_head offdev_netdevs;
+};
+
+struct ns_get_path_bpf_prog_args {
+ struct bpf_prog *prog;
+ struct bpf_prog_info *info;
+};
+
+struct ns_get_path_bpf_map_args {
+ struct bpf_offloaded_map *offmap;
+ struct bpf_map_info *info;
+};
+
+struct bpf_netns_link {
+ struct bpf_link link;
+ enum bpf_attach_type type;
+ enum netns_bpf_attach_type netns_type;
+ struct net *net;
+};
+
+enum bpf_stack_build_id_status {
+ BPF_STACK_BUILD_ID_EMPTY = 0,
+ BPF_STACK_BUILD_ID_VALID = 1,
+ BPF_STACK_BUILD_ID_IP = 2,
+};
+
+struct bpf_stack_build_id {
+ __s32 status;
+ unsigned char build_id[20];
+ union {
+ __u64 offset;
+ __u64 ip;
+ };
+};
+
+enum {
+ BPF_F_SKIP_FIELD_MASK = 255,
+ BPF_F_USER_STACK = 256,
+ BPF_F_FAST_STACK_CMP = 512,
+ BPF_F_REUSE_STACKID = 1024,
+ BPF_F_USER_BUILD_ID = 2048,
+};
+
+typedef __u32 Elf32_Addr;
+
+typedef __u16 Elf32_Half;
+
+typedef __u32 Elf32_Off;
+
+struct elf32_hdr {
+ unsigned char e_ident[16];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry;
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+};
+
+typedef struct elf32_hdr Elf32_Ehdr;
+
+struct elf32_phdr {
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+};
+
+typedef struct elf32_phdr Elf32_Phdr;
+
+struct elf64_phdr {
+ Elf64_Word p_type;
+ Elf64_Word p_flags;
+ Elf64_Off p_offset;
+ Elf64_Addr p_vaddr;
+ Elf64_Addr p_paddr;
+ Elf64_Xword p_filesz;
+ Elf64_Xword p_memsz;
+ Elf64_Xword p_align;
+};
+
+typedef struct elf64_phdr Elf64_Phdr;
+
+typedef struct elf32_note Elf32_Nhdr;
+
+struct stack_map_bucket {
+ struct pcpu_freelist_node fnode;
+ u32 hash;
+ u32 nr;
+ u64 data[0];
+};
+
+struct bpf_stack_map {
+ struct bpf_map map;
+ void *elems;
+ struct pcpu_freelist freelist;
+ u32 n_buckets;
+ struct stack_map_bucket *buckets[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct stack_map_irq_work {
+ struct irq_work irq_work;
+ struct mm_struct *mm;
+};
+
+typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64);
+
+typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64);
+
+enum {
+ BPF_F_SYSCTL_BASE_NAME = 1,
+};
+
+struct bpf_prog_list {
+ struct list_head node;
+ struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
+ struct bpf_cgroup_storage *storage[2];
+};
+
+struct qdisc_skb_cb {
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
+ };
+ unsigned char data[20];
+};
+
+struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
+ void *data_meta;
+ void *data_end;
+};
+
+enum {
+ TCPF_ESTABLISHED = 2,
+ TCPF_SYN_SENT = 4,
+ TCPF_SYN_RECV = 8,
+ TCPF_FIN_WAIT1 = 16,
+ TCPF_FIN_WAIT2 = 32,
+ TCPF_TIME_WAIT = 64,
+ TCPF_CLOSE = 128,
+ TCPF_CLOSE_WAIT = 256,
+ TCPF_LAST_ACK = 512,
+ TCPF_LISTEN = 1024,
+ TCPF_CLOSING = 2048,
+ TCPF_NEW_SYN_RECV = 4096,
+};
+
+typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64);
+
+typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t);
+
+typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t);
+
+typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t);
+
+enum sock_type {
+ SOCK_STREAM = 1,
+ SOCK_DGRAM = 2,
+ SOCK_RAW = 3,
+ SOCK_RDM = 4,
+ SOCK_SEQPACKET = 5,
+ SOCK_DCCP = 6,
+ SOCK_PACKET = 10,
+};
+
+enum {
+ IPPROTO_IP = 0,
+ IPPROTO_ICMP = 1,
+ IPPROTO_IGMP = 2,
+ IPPROTO_IPIP = 4,
+ IPPROTO_TCP = 6,
+ IPPROTO_EGP = 8,
+ IPPROTO_PUP = 12,
+ IPPROTO_UDP = 17,
+ IPPROTO_IDP = 22,
+ IPPROTO_TP = 29,
+ IPPROTO_DCCP = 33,
+ IPPROTO_IPV6 = 41,
+ IPPROTO_RSVP = 46,
+ IPPROTO_GRE = 47,
+ IPPROTO_ESP = 50,
+ IPPROTO_AH = 51,
+ IPPROTO_MTP = 92,
+ IPPROTO_BEETPH = 94,
+ IPPROTO_ENCAP = 98,
+ IPPROTO_PIM = 103,
+ IPPROTO_COMP = 108,
+ IPPROTO_SCTP = 132,
+ IPPROTO_UDPLITE = 136,
+ IPPROTO_MPLS = 137,
+ IPPROTO_ETHERNET = 143,
+ IPPROTO_RAW = 255,
+ IPPROTO_MPTCP = 262,
+ IPPROTO_MAX = 263,
+};
+
+enum sock_flags {
+ SOCK_DEAD = 0,
+ SOCK_DONE = 1,
+ SOCK_URGINLINE = 2,
+ SOCK_KEEPOPEN = 3,
+ SOCK_LINGER = 4,
+ SOCK_DESTROY = 5,
+ SOCK_BROADCAST = 6,
+ SOCK_TIMESTAMP = 7,
+ SOCK_ZAPPED = 8,
+ SOCK_USE_WRITE_QUEUE = 9,
+ SOCK_DBG = 10,
+ SOCK_RCVTSTAMP = 11,
+ SOCK_RCVTSTAMPNS = 12,
+ SOCK_LOCALROUTE = 13,
+ SOCK_QUEUE_SHRUNK = 14,
+ SOCK_MEMALLOC = 15,
+ SOCK_TIMESTAMPING_RX_SOFTWARE = 16,
+ SOCK_FASYNC = 17,
+ SOCK_RXQ_OVFL = 18,
+ SOCK_ZEROCOPY = 19,
+ SOCK_WIFI_STATUS = 20,
+ SOCK_NOFCS = 21,
+ SOCK_FILTER_LOCKED = 22,
+ SOCK_SELECT_ERR_QUEUE = 23,
+ SOCK_RCU_FREE = 24,
+ SOCK_TXTIME = 25,
+ SOCK_XDP = 26,
+ SOCK_TSTAMP_NEW = 27,
+};
+
+struct reuseport_array {
+ struct bpf_map map;
+ struct sock *ptrs[0];
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT = 0,
+ BPF_STRUCT_OPS_STATE_INUSE = 1,
+ BPF_STRUCT_OPS_STATE_TOBEFREE = 2,
+};
+
+struct bpf_struct_ops_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ char data[0];
+};
+
+struct bpf_struct_ops_map {
+ struct bpf_map map;
+ const struct bpf_struct_ops *st_ops;
+ struct mutex lock;
+ struct bpf_prog **progs;
+ void *image;
+ struct bpf_struct_ops_value *uvalue;
+ long: 64;
+ long: 64;
+ struct bpf_struct_ops_value kvalue;
+};
+
+struct bpf_struct_ops_tcp_congestion_ops {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct tcp_congestion_ops data;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
+ PERF_FORMAT_ID = 4,
+ PERF_FORMAT_GROUP = 8,
+ PERF_FORMAT_MAX = 16,
+};
+
+enum perf_event_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1,
+};
+
+struct perf_ns_link_info {
+ __u64 dev;
+ __u64 ino;
+};
+
+enum {
+ NET_NS_INDEX = 0,
+ UTS_NS_INDEX = 1,
+ IPC_NS_INDEX = 2,
+ PID_NS_INDEX = 3,
+ USER_NS_INDEX = 4,
+ MNT_NS_INDEX = 5,
+ CGROUP_NS_INDEX = 6,
+ NR_NAMESPACES = 7,
+};
+
+enum perf_event_type {
+ PERF_RECORD_MMAP = 1,
+ PERF_RECORD_LOST = 2,
+ PERF_RECORD_COMM = 3,
+ PERF_RECORD_EXIT = 4,
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
+ PERF_RECORD_FORK = 7,
+ PERF_RECORD_READ = 8,
+ PERF_RECORD_SAMPLE = 9,
+ PERF_RECORD_MMAP2 = 10,
+ PERF_RECORD_AUX = 11,
+ PERF_RECORD_ITRACE_START = 12,
+ PERF_RECORD_LOST_SAMPLES = 13,
+ PERF_RECORD_SWITCH = 14,
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
+ PERF_RECORD_NAMESPACES = 16,
+ PERF_RECORD_KSYMBOL = 17,
+ PERF_RECORD_BPF_EVENT = 18,
+ PERF_RECORD_CGROUP = 19,
+ PERF_RECORD_MAX = 20,
+};
+
+struct swevent_hlist {
+ struct hlist_head heads[256];
+ struct callback_head callback_head;
+};
+
+struct pmu_event_list {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+
+struct perf_buffer {
+ refcount_t refcount;
+ struct callback_head callback_head;
+ int nr_pages;
+ int overwrite;
+ int paused;
+ atomic_t poll;
+ local_t head;
+ unsigned int nest;
+ local_t events;
+ local_t wakeup;
+ local_t lost;
+ long int watermark;
+ long int aux_watermark;
+ spinlock_t event_lock;
+ struct list_head event_list;
+ atomic_t mmap_count;
+ long unsigned int mmap_locked;
+ struct user_struct *mmap_user;
+ long int aux_head;
+ unsigned int aux_nest;
+ long int aux_wakeup;
+ long unsigned int aux_pgoff;
+ int aux_nr_pages;
+ int aux_overwrite;
+ atomic_t aux_mmap_count;
+ long unsigned int aux_mmap_locked;
+ void (*free_aux)(void *);
+ refcount_t aux_refcount;
+ int aux_in_sampling;
+ void **aux_pages;
+ void *aux_priv;
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+};
+
+struct match_token {
+ int token;
+ const char *pattern;
+};
+
+enum {
+ MAX_OPT_ARGS = 3,
+};
+
+typedef struct {
+ char *from;
+ char *to;
+} substring_t;
+
+struct min_heap {
+ void *data;
+ int nr;
+ int size;
+};
+
+struct min_heap_callbacks {
+ int elem_size;
+ bool (*less)(const void *, const void *);
+ void (*swp)(void *, void *);
+};
+
+typedef int (*remote_function_f)(void *);
+
+struct remote_function_call {
+ struct task_struct *p;
+ remote_function_f func;
+ void *info;
+ int ret;
+};
+
+typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *);
+
+struct event_function_struct {
+ struct perf_event *event;
+ event_f func;
+ void *data;
+};
+
+enum event_type_t {
+ EVENT_FLEXIBLE = 1,
+ EVENT_PINNED = 2,
+ EVENT_TIME = 4,
+ EVENT_CPU = 8,
+ EVENT_ALL = 3,
+};
+
+struct stop_event_data {
+ struct perf_event *event;
+ unsigned int restart;
+};
+
+struct perf_read_data {
+ struct perf_event *event;
+ bool group;
+ int ret;
+};
+
+struct perf_read_event {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+};
+
+typedef void perf_iterate_f(struct perf_event *, void *);
+
+struct remote_output {
+ struct perf_buffer *rb;
+ int err;
+};
+
+struct perf_task_event {
+ struct task_struct *task;
+ struct perf_event_context *task_ctx;
+ struct {
+ struct perf_event_header header;
+ u32 pid;
+ u32 ppid;
+ u32 tid;
+ u32 ptid;
+ u64 time;
+ } event_id;
+};
+
+struct perf_comm_event {
+ struct task_struct *task;
+ char *comm;
+ int comm_size;
+ struct {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+ } event_id;
+};
+
+struct perf_namespaces_event {
+ struct task_struct *task;
+ struct {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+ u64 nr_namespaces;
+ struct perf_ns_link_info link_info[7];
+ } event_id;
+};
+
+struct perf_cgroup_event {
+ char *path;
+ int path_size;
+ struct {
+ struct perf_event_header header;
+ u64 id;
+ char path[0];
+ } event_id;
+};
+
+#define BUILD_ID_SIZE_MAX 20
+
+struct perf_mmap_event {
+ struct vm_area_struct *vma;
+ const char *file_name;
+ int file_size;
+ int maj;
+ int min;
+ u64 ino;
+ u64 ino_generation;
+ u32 prot;
+ u32 flags;
+ u8 build_id[BUILD_ID_SIZE_MAX];
+ u32 build_id_size;
+
+ struct {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ } event_id;
+};
+
+struct perf_switch_event {
+ struct task_struct *task;
+ struct task_struct *next_prev;
+ struct {
+ struct perf_event_header header;
+ u32 next_prev_pid;
+ u32 next_prev_tid;
+ } event_id;
+};
+
+struct perf_ksymbol_event {
+ const char *name;
+ int name_len;
+ struct {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ } event_id;
+};
+
+struct perf_bpf_event {
+ struct bpf_prog *prog;
+ struct {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+ u8 tag[8];
+ } event_id;
+};
+
+struct swevent_htable {
+ struct swevent_hlist *swevent_hlist;
+ struct mutex hlist_mutex;
+ int hlist_refcount;
+ int recursion[4];
+};
+
+enum perf_probe_config {
+ PERF_PROBE_CONFIG_IS_RETPROBE = 1,
+ PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
+ PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32,
+};
+
+enum {
+ IF_ACT_NONE = -1,
+ IF_ACT_FILTER = 0,
+ IF_ACT_START = 1,
+ IF_ACT_STOP = 2,
+ IF_SRC_FILE = 3,
+ IF_SRC_KERNEL = 4,
+ IF_SRC_FILEADDR = 5,
+ IF_SRC_KERNELADDR = 6,
+};
+
+enum {
+ IF_STATE_ACTION = 0,
+ IF_STATE_SOURCE = 1,
+ IF_STATE_END = 2,
+};
+
+typedef unsigned int pto_T_____24;
+
+struct perf_aux_event {
+ struct perf_event_header header;
+ u32 pid;
+ u32 tid;
+};
+
+struct perf_aux_event___2 {
+ struct perf_event_header header;
+ u64 offset;
+ u64 size;
+ u64 flags;
+};
+
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = -32,
+ PERF_CONTEXT_KERNEL = -128,
+ PERF_CONTEXT_USER = -512,
+ PERF_CONTEXT_GUEST = -2048,
+ PERF_CONTEXT_GUEST_KERNEL = -2176,
+ PERF_CONTEXT_GUEST_USER = -2560,
+ PERF_CONTEXT_MAX = -4095,
+};
+
+struct callchain_cpus_entries {
+ struct callback_head callback_head;
+ struct perf_callchain_entry *cpu_entries[0];
+};
+
+enum bp_type_idx {
+ TYPE_INST = 0,
+ TYPE_DATA = 0,
+ TYPE_MAX = 1,
+};
+
+struct bp_cpuinfo {
+ unsigned int cpu_pinned;
+ unsigned int *tsk_pinned;
+ unsigned int flexible;
+};
+
+struct bp_busy_slots {
+ unsigned int pinned;
+ unsigned int flexible;
+};
+
+typedef u8 uprobe_opcode_t;
+
+struct uprobe {
+ struct rb_node rb_node;
+ refcount_t ref;
+ struct rw_semaphore register_rwsem;
+ struct rw_semaphore consumer_rwsem;
+ struct list_head pending_list;
+ struct uprobe_consumer *consumers;
+ struct inode *inode;
+ loff_t offset;
+ loff_t ref_ctr_offset;
+ long unsigned int flags;
+ struct arch_uprobe arch;
+};
+
+struct xol_area {
+ wait_queue_head_t wq;
+ atomic_t slot_count;
+ long unsigned int *bitmap;
+ struct vm_special_mapping xol_mapping;
+ struct page *pages[2];
+ long unsigned int vaddr;
+};
+
+typedef long unsigned int vm_flags_t;
+
+struct compact_control;
+
+struct capture_control {
+ struct compact_control *cc;
+ struct page *page;
+};
+
+struct page_vma_mapped_walk {
+ struct page *page;
+ struct vm_area_struct *vma;
+ long unsigned int address;
+ pmd_t *pmd;
+ pte_t *pte;
+ spinlock_t *ptl;
+ unsigned int flags;
+};
+
+struct mmu_notifier_range {
+ long unsigned int start;
+ long unsigned int end;
+};
+
+struct compact_control {
+ struct list_head freepages;
+ struct list_head migratepages;
+ unsigned int nr_freepages;
+ unsigned int nr_migratepages;
+ long unsigned int free_pfn;
+ long unsigned int migrate_pfn;
+ long unsigned int fast_start_pfn;
+ struct zone *zone;
+ long unsigned int total_migrate_scanned;
+ long unsigned int total_free_scanned;
+ short unsigned int fast_search_fail;
+ short int search_order;
+ const gfp_t gfp_mask;
+ int order;
+ int migratetype;
+ const unsigned int alloc_flags;
+ const int highest_zoneidx;
+ enum migrate_mode mode;
+ bool ignore_skip_hint;
+ bool no_set_skip_hint;
+ bool ignore_block_suitable;
+ bool direct_compaction;
+ bool whole_zone;
+ bool contended;
+ bool rescan;
+ bool alloc_contig;
+};
+
+struct delayed_uprobe {
+ struct list_head list;
+ struct uprobe *uprobe;
+ struct mm_struct *mm;
+};
+
+struct map_info {
+ struct map_info *next;
+ struct mm_struct *mm;
+ long unsigned int vaddr;
+};
+
+struct trace_event_raw_context_tracking_user {
+ struct trace_entry ent;
+ int dummy;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_context_tracking_user {};
+
+typedef void (*btf_trace_user_enter)(void *, int);
+
+typedef void (*btf_trace_user_exit)(void *, int);
+
+typedef enum ctx_state pto_T_____25;
+
+enum rseq_cpu_id_state {
+ RSEQ_CPU_ID_UNINITIALIZED = -1,
+ RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
+};
+
+enum rseq_flags {
+ RSEQ_FLAG_UNREGISTER = 1,
+};
+
+enum rseq_cs_flags {
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1,
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2,
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4,
+};
+
+struct rseq_cs {
+ __u32 version;
+ __u32 flags;
+ __u64 start_ip;
+ __u64 post_commit_offset;
+ __u64 abort_ip;
+};
+
+struct trace_event_raw_rseq_update {
+ struct trace_entry ent;
+ s32 cpu_id;
+ char __data[0];
+};
+
+struct trace_event_raw_rseq_ip_fixup {
+ struct trace_entry ent;
+ long unsigned int regs_ip;
+ long unsigned int start_ip;
+ long unsigned int post_commit_offset;
+ long unsigned int abort_ip;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_rseq_update {};
+
+struct trace_event_data_offsets_rseq_ip_fixup {};
+
+typedef void (*btf_trace_rseq_update)(void *, struct task_struct *);
+
+typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+
+typedef struct pglist_data pg_data_t;
+
+typedef void (*xa_update_node_t)(struct xa_node *);
+
+struct xa_state {
+ struct xarray *xa;
+ long unsigned int xa_index;
+ unsigned char xa_shift;
+ unsigned char xa_sibs;
+ unsigned char xa_offset;
+ unsigned char xa_pad;
+ struct xa_node *xa_node;
+ struct xa_node *xa_alloc;
+ xa_update_node_t xa_update;
+};
+
+enum positive_aop_returns {
+ AOP_WRITEPAGE_ACTIVATE = 524288,
+ AOP_TRUNCATED_PAGE = 524289,
+};
+
+enum {
+ SB_UNFROZEN = 0,
+ SB_FREEZE_WRITE = 1,
+ SB_FREEZE_PAGEFAULT = 2,
+ SB_FREEZE_FS = 3,
+ SB_FREEZE_COMPLETE = 4,
+};
+
+enum mapping_flags {
+ AS_EIO = 0,
+ AS_ENOSPC = 1,
+ AS_MM_ALL_LOCKS = 2,
+ AS_UNEVICTABLE = 3,
+ AS_EXITING = 4,
+ AS_NO_WRITEBACK_TAGS = 5,
+};
+
+enum iter_type {
+ /* iter types */
+ ITER_IOVEC,
+ ITER_KVEC,
+ ITER_BVEC,
+ ITER_PIPE,
+ ITER_XARRAY,
+ ITER_DISCARD,
+ ITER_UBUF,
+};
+
+struct pagevec {
+ unsigned char nr;
+ bool percpu_pvec_drained;
+ struct page *pages[15];
+};
+
+struct fid {
+ union {
+ struct {
+ u32 ino;
+ u32 gen;
+ u32 parent_ino;
+ u32 parent_gen;
+ } i32;
+ struct {
+ u32 block;
+ u16 partref;
+ u16 parent_partref;
+ u32 generation;
+ u32 parent_block;
+ u32 parent_generation;
+ } udf;
+ __u32 raw[0];
+ };
+};
+
+struct trace_event_raw_mm_filemap_op_page_cache {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ long unsigned int i_ino;
+ long unsigned int index;
+ dev_t s_dev;
+ char __data[0];
+};
+
+struct trace_event_raw_filemap_set_wb_err {
+ struct trace_entry ent;
+ long unsigned int i_ino;
+ dev_t s_dev;
+ errseq_t errseq;
+ char __data[0];
+};
+
+struct trace_event_raw_file_check_and_advance_wb_err {
+ struct trace_entry ent;
+ struct file *file;
+ long unsigned int i_ino;
+ dev_t s_dev;
+ errseq_t old;
+ errseq_t new;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_mm_filemap_op_page_cache {};
+
+struct trace_event_data_offsets_filemap_set_wb_err {};
+
+struct trace_event_data_offsets_file_check_and_advance_wb_err {};
+
+typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct page *);
+
+typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct page *);
+
+typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t);
+
+typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t);
+
+struct wait_page_key {
+ struct page *page;
+ int bit_nr;
+ int page_match;
+};
+
+struct wait_page_queue {
+ struct page *page;
+ int bit_nr;
+ wait_queue_entry_t wait;
+};
+
+enum behavior {
+ EXCLUSIVE = 0,
+ SHARED = 1,
+ DROP = 2,
+};
+
+struct kmem_cache_order_objects {
+ unsigned int x;
+};
+
+struct memcg_cache_array;
+
+struct memcg_cache_params {
+ struct kmem_cache *root_cache;
+ union {
+ struct {
+ struct memcg_cache_array *memcg_caches;
+ struct list_head __root_caches_node;
+ struct list_head children;
+ bool dying;
+ };
+ struct {
+ struct mem_cgroup *memcg;
+ struct list_head children_node;
+ struct list_head kmem_caches_node;
+ struct percpu_ref refcnt;
+ void (*work_fn)(struct kmem_cache *);
+ union {
+ struct callback_head callback_head;
+ struct work_struct work;
+ };
+ };
+ };
+};
+
+struct kmem_cache_cpu;
+
+struct kmem_cache_node;
+
+struct kmem_cache {
+ struct kmem_cache_cpu *cpu_slab;
+ slab_flags_t flags;
+ long unsigned int min_partial;
+ unsigned int size;
+ unsigned int object_size;
+ unsigned int offset;
+ struct kmem_cache_order_objects oo;
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags;
+ int refcount;
+ void (*ctor)(void *);
+ unsigned int inuse;
+ unsigned int align;
+ unsigned int red_left_pad;
+ const char *name;
+ struct list_head list;
+ struct kobject kobj;
+ struct work_struct kobj_remove_work;
+ struct memcg_cache_params memcg_params;
+ unsigned int max_attr_size;
+ struct kset *memcg_kset;
+ unsigned int useroffset;
+ unsigned int usersize;
+ struct kmem_cache_node *node[1];
+};
+
+struct memcg_cache_array {
+ struct callback_head rcu;
+ struct kmem_cache *entries[0];
+};
+
+struct kmem_cache_cpu {
+ void **freelist;
+ long unsigned int tid;
+ struct page *page;
+};
+
+struct kmem_cache_node {
+ spinlock_t list_lock;
+ long unsigned int nr_partial;
+ struct list_head partial;
+ atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
+ struct list_head full;
+};
+
+enum slab_state {
+ DOWN = 0,
+ PARTIAL = 1,
+ PARTIAL_NODE = 2,
+ UP = 3,
+ FULL = 4,
+};
+
+struct kmalloc_info_struct {
+ const char *name[3];
+ unsigned int size;
+};
+
+enum oom_constraint {
+ CONSTRAINT_NONE = 0,
+ CONSTRAINT_CPUSET = 1,
+ CONSTRAINT_MEMORY_POLICY = 2,
+ CONSTRAINT_MEMCG = 3,
+};
+
+struct oom_control {
+ struct zonelist *zonelist;
+ nodemask_t *nodemask;
+ struct mem_cgroup *memcg;
+ const gfp_t gfp_mask;
+ const int order;
+ long unsigned int totalpages;
+ struct task_struct *chosen;
+ long unsigned int chosen_points;
+ enum oom_constraint constraint;
+};
+
+enum compact_priority {
+ COMPACT_PRIO_SYNC_FULL = 0,
+ MIN_COMPACT_PRIORITY = 0,
+ COMPACT_PRIO_SYNC_LIGHT = 1,
+ MIN_COMPACT_COSTLY_PRIORITY = 1,
+ DEF_COMPACT_PRIORITY = 1,
+ COMPACT_PRIO_ASYNC = 2,
+ INIT_COMPACT_PRIORITY = 2,
+};
+
+enum compact_result {
+ COMPACT_NOT_SUITABLE_ZONE = 0,
+ COMPACT_SKIPPED = 1,
+ COMPACT_DEFERRED = 2,
+ COMPACT_INACTIVE = 2,
+ COMPACT_NO_SUITABLE_PAGE = 3,
+ COMPACT_CONTINUE = 4,
+ COMPACT_COMPLETE = 5,
+ COMPACT_PARTIAL_SKIPPED = 6,
+ COMPACT_CONTENDED = 7,
+ COMPACT_SUCCESS = 8,
+};
+
+struct trace_event_raw_oom_score_adj_update {
+ struct trace_entry ent;
+ pid_t pid;
+ char comm[16];
+ short int oom_score_adj;
+ char __data[0];
+};
+
+struct trace_event_raw_reclaim_retry_zone {
+ struct trace_entry ent;
+ int node;
+ int zone_idx;
+ int order;
+ long unsigned int reclaimable;
+ long unsigned int available;
+ long unsigned int min_wmark;
+ int no_progress_loops;
+ bool wmark_check;
+ char __data[0];
+};
+
+struct trace_event_raw_mark_victim {
+ struct trace_entry ent;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_wake_reaper {
+ struct trace_entry ent;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_start_task_reaping {
+ struct trace_entry ent;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_finish_task_reaping {
+ struct trace_entry ent;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_skip_task_reaping {
+ struct trace_entry ent;
+ int pid;
+ char __data[0];
+};
+
+struct trace_event_raw_compact_retry {
+ struct trace_entry ent;
+ int order;
+ int priority;
+ int result;
+ int retries;
+ int max_retries;
+ bool ret;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_oom_score_adj_update {};
+
+struct trace_event_data_offsets_reclaim_retry_zone {};
+
+struct trace_event_data_offsets_mark_victim {};
+
+struct trace_event_data_offsets_wake_reaper {};
+
+struct trace_event_data_offsets_start_task_reaping {};
+
+struct trace_event_data_offsets_finish_task_reaping {};
+
+struct trace_event_data_offsets_skip_task_reaping {};
+
+struct trace_event_data_offsets_compact_retry {};
+
+typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *);
+
+typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool);
+
+typedef void (*btf_trace_mark_victim)(void *, int);
+
+typedef void (*btf_trace_wake_reaper)(void *, int);
+
+typedef void (*btf_trace_start_task_reaping)(void *, int);
+
+typedef void (*btf_trace_finish_task_reaping)(void *, int);
+
+typedef void (*btf_trace_skip_task_reaping)(void *, int);
+
+typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool);
+
+enum wb_congested_state {
+ WB_async_congested = 0,
+ WB_sync_congested = 1,
+};
+
+enum {
+ XA_CHECK_SCHED = 4096,
+};
+
+enum wb_state {
+ WB_registered = 0,
+ WB_writeback_running = 1,
+ WB_has_dirty_io = 2,
+ WB_start_all = 3,
+};
+
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+struct wb_lock_cookie {
+ bool locked;
+ long unsigned int flags;
+};
+
+typedef int (*writepage_t)(struct page *, struct writeback_control *, void *);
+
+struct dirty_throttle_control {
+ struct wb_domain *dom;
+ struct dirty_throttle_control *gdtc;
+ struct bdi_writeback *wb;
+ struct fprop_local_percpu *wb_completions;
+ long unsigned int avail;
+ long unsigned int dirty;
+ long unsigned int thresh;
+ long unsigned int bg_thresh;
+ long unsigned int wb_dirty;
+ long unsigned int wb_thresh;
+ long unsigned int wb_bg_thresh;
+ long unsigned int pos_ratio;
+};
+
+struct trace_event_raw_mm_lru_insertion {
+ struct trace_entry ent;
+ struct page *page;
+ long unsigned int pfn;
+ int lru;
+ long unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_lru_activate {
+ struct trace_entry ent;
+ struct page *page;
+ long unsigned int pfn;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_mm_lru_insertion {};
+
+struct trace_event_data_offsets_mm_lru_activate {};
+
+typedef void (*btf_trace_mm_lru_insertion)(void *, struct page *, int);
+
+typedef void (*btf_trace_mm_lru_activate)(void *, struct page *);
+
+struct lru_rotate {
+ local_lock_t lock;
+ struct pagevec pvec;
+};
+
+struct lru_pvecs {
+ local_lock_t lock;
+ struct pagevec lru_add;
+ struct pagevec lru_deactivate_file;
+ struct pagevec lru_deactivate;
+ struct pagevec lru_lazyfree;
+ struct pagevec activate_page;
+};
+
+typedef struct {
+ long unsigned int val;
+} swp_entry_t;
+
+enum lruvec_flags {
+ LRUVEC_CONGESTED = 0,
+};
+
+enum pgdat_flags {
+ PGDAT_DIRTY = 0,
+ PGDAT_WRITEBACK = 1,
+ PGDAT_RECLAIM_LOCKED = 2,
+};
+
+struct reclaim_stat {
+ unsigned int nr_dirty;
+ unsigned int nr_unqueued_dirty;
+ unsigned int nr_congested;
+ unsigned int nr_writeback;
+ unsigned int nr_immediate;
+ unsigned int nr_pageout;
+ unsigned int nr_activate[2];
+ unsigned int nr_ref_keep;
+ unsigned int nr_unmap_fail;
+ unsigned int nr_lazyfree_fail;
+};
+
+enum mem_cgroup_protection {
+ MEMCG_PROT_NONE = 0,
+ MEMCG_PROT_LOW = 1,
+ MEMCG_PROT_MIN = 2,
+};
+
+enum ttu_flags {
+ TTU_MIGRATION = 1,
+ TTU_MUNLOCK = 2,
+ TTU_SPLIT_HUGE_PMD = 4,
+ TTU_IGNORE_MLOCK = 8,
+ TTU_IGNORE_ACCESS = 16,
+ TTU_IGNORE_HWPOISON = 32,
+ TTU_BATCH_FLUSH = 64,
+ TTU_RMAP_LOCKED = 128,
+ TTU_SPLIT_FREEZE = 256,
+};
+
+enum migrate_reason {
+ MR_COMPACTION = 0,
+ MR_MEMORY_FAILURE = 1,
+ MR_MEMORY_HOTPLUG = 2,
+ MR_SYSCALL = 3,
+ MR_MEMPOLICY_MBIND = 4,
+ MR_NUMA_MISPLACED = 5,
+ MR_CONTIG_RANGE = 6,
+ MR_TYPES = 7,
+};
+
+struct trace_event_raw_mm_vmscan_kswapd_sleep {
+ struct trace_entry ent;
+ int nid;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_kswapd_wake {
+ struct trace_entry ent;
+ int nid;
+ int zid;
+ int order;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_wakeup_kswapd {
+ struct trace_entry ent;
+ int nid;
+ int zid;
+ int order;
+ gfp_t gfp_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template {
+ struct trace_entry ent;
+ int order;
+ gfp_t gfp_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_direct_reclaim_end_template {
+ struct trace_entry ent;
+ long unsigned int nr_reclaimed;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_shrink_slab_start {
+ struct trace_entry ent;
+ struct shrinker *shr;
+ void *shrink;
+ int nid;
+ long int nr_objects_to_shrink;
+ gfp_t gfp_flags;
+ long unsigned int cache_items;
+ long long unsigned int delta;
+ long unsigned int total_scan;
+ int priority;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_shrink_slab_end {
+ struct trace_entry ent;
+ struct shrinker *shr;
+ int nid;
+ void *shrink;
+ long int unused_scan;
+ long int new_scan;
+ int retval;
+ long int total_scan;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_lru_isolate {
+ struct trace_entry ent;
+ int highest_zoneidx;
+ int order;
+ long unsigned int nr_requested;
+ long unsigned int nr_scanned;
+ long unsigned int nr_skipped;
+ long unsigned int nr_taken;
+ isolate_mode_t isolate_mode;
+ int lru;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_writepage {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ int reclaim_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_lru_shrink_inactive {
+ struct trace_entry ent;
+ int nid;
+ long unsigned int nr_scanned;
+ long unsigned int nr_reclaimed;
+ long unsigned int nr_dirty;
+ long unsigned int nr_writeback;
+ long unsigned int nr_congested;
+ long unsigned int nr_immediate;
+ unsigned int nr_activate0;
+ unsigned int nr_activate1;
+ long unsigned int nr_ref_keep;
+ long unsigned int nr_unmap_fail;
+ int priority;
+ int reclaim_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_lru_shrink_active {
+ struct trace_entry ent;
+ int nid;
+ long unsigned int nr_taken;
+ long unsigned int nr_active;
+ long unsigned int nr_deactivated;
+ long unsigned int nr_referenced;
+ int priority;
+ int reclaim_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_inactive_list_is_low {
+ struct trace_entry ent;
+ int nid;
+ int reclaim_idx;
+ long unsigned int total_inactive;
+ long unsigned int inactive;
+ long unsigned int total_active;
+ long unsigned int active;
+ long unsigned int ratio;
+ int reclaim_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_vmscan_node_reclaim_begin {
+ struct trace_entry ent;
+ int nid;
+ int order;
+ gfp_t gfp_flags;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {};
+
+struct trace_event_data_offsets_mm_vmscan_kswapd_wake {};
+
+struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {};
+
+struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {};
+
+struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {};
+
+struct trace_event_data_offsets_mm_shrink_slab_start {};
+
+struct trace_event_data_offsets_mm_shrink_slab_end {};
+
+struct trace_event_data_offsets_mm_vmscan_lru_isolate {};
+
+struct trace_event_data_offsets_mm_vmscan_writepage {};
+
+struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {};
+
+struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {};
+
+struct trace_event_data_offsets_mm_vmscan_inactive_list_is_low {};
+
+struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {};
+
+typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int);
+
+typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int);
+
+typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t);
+
+typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t);
+
+typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t);
+
+typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t);
+
+typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int);
+
+typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int);
+
+typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int);
+
+typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int);
+
+typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int);
+
+typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, isolate_mode_t, int);
+
+typedef void (*btf_trace_mm_vmscan_writepage)(void *, struct page *);
+
+typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int);
+
+typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int);
+
+typedef void (*btf_trace_mm_vmscan_inactive_list_is_low)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int);
+
+typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t);
+
+typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int);
+
+struct scan_control {
+ long unsigned int nr_to_reclaim;
+ nodemask_t *nodemask;
+ struct mem_cgroup *target_mem_cgroup;
+ long unsigned int anon_cost;
+ long unsigned int file_cost;
+ unsigned int may_deactivate: 2;
+ unsigned int force_deactivate: 1;
+ unsigned int skipped_deactivate: 1;
+ unsigned int may_writepage: 1;
+ unsigned int may_unmap: 1;
+ unsigned int may_swap: 1;
+ unsigned int memcg_low_reclaim: 1;
+ unsigned int memcg_low_skipped: 1;
+ unsigned int hibernation_mode: 1;
+ unsigned int compaction_ready: 1;
+ unsigned int cache_trim_mode: 1;
+ unsigned int file_is_tiny: 1;
+ s8 order;
+ s8 priority;
+ s8 reclaim_idx;
+ gfp_t gfp_mask;
+ long unsigned int nr_scanned;
+ long unsigned int nr_reclaimed;
+ struct {
+ unsigned int dirty;
+ unsigned int unqueued_dirty;
+ unsigned int congested;
+ unsigned int writeback;
+ unsigned int immediate;
+ unsigned int file_taken;
+ unsigned int taken;
+ } nr;
+ struct reclaim_state reclaim_state;
+};
+
+typedef enum {
+ PAGE_KEEP = 0,
+ PAGE_ACTIVATE = 1,
+ PAGE_SUCCESS = 2,
+ PAGE_CLEAN = 3,
+} pageout_t;
+
+enum page_references {
+ PAGEREF_RECLAIM = 0,
+ PAGEREF_RECLAIM_CLEAN = 1,
+ PAGEREF_KEEP = 2,
+ PAGEREF_ACTIVATE = 3,
+};
+
+enum scan_balance {
+ SCAN_EQUAL = 0,
+ SCAN_FRACT = 1,
+ SCAN_ANON = 2,
+ SCAN_FILE = 3,
+};
+
+struct kstatfs {
+ long int f_type;
+ long int f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ long int f_namelen;
+ long int f_frsize;
+ long int f_flags;
+ long int f_spare[4];
+};
+
+struct constant_table {
+ const char *name;
+ int value;
+};
+
+struct shared_policy {};
+
+struct simple_xattrs {
+ struct list_head head;
+ spinlock_t lock;
+};
+
+struct simple_xattr {
+ struct list_head list;
+ char *name;
+ size_t size;
+ char value[0];
+};
+
+enum fid_type {
+ FILEID_ROOT = 0,
+ FILEID_INO32_GEN = 1,
+ FILEID_INO32_GEN_PARENT = 2,
+ FILEID_BTRFS_WITHOUT_PARENT = 77,
+ FILEID_BTRFS_WITH_PARENT = 78,
+ FILEID_BTRFS_WITH_PARENT_ROOT = 79,
+ FILEID_UDF_WITHOUT_PARENT = 81,
+ FILEID_UDF_WITH_PARENT = 82,
+ FILEID_NILFS_WITHOUT_PARENT = 97,
+ FILEID_NILFS_WITH_PARENT = 98,
+ FILEID_FAT_WITHOUT_PARENT = 113,
+ FILEID_FAT_WITH_PARENT = 114,
+ FILEID_LUSTRE = 151,
+ FILEID_KERNFS = 254,
+ FILEID_INVALID = 255,
+};
+
+struct shmem_inode_info {
+ spinlock_t lock;
+ unsigned int seals;
+ long unsigned int flags;
+ long unsigned int alloced;
+ long unsigned int swapped;
+ struct list_head shrinklist;
+ struct list_head swaplist;
+ struct shared_policy policy;
+ struct simple_xattrs xattrs;
+ atomic_t stop_eviction;
+ struct inode vfs_inode;
+};
+
+struct shmem_sb_info {
+ long unsigned int max_blocks;
+ struct percpu_counter used_blocks;
+ long unsigned int max_inodes;
+ long unsigned int free_inodes;
+ spinlock_t stat_lock;
+ umode_t mode;
+ unsigned char huge;
+ kuid_t uid;
+ kgid_t gid;
+ struct mempolicy *mpol;
+ spinlock_t shrinklist_lock;
+ struct list_head shrinklist;
+ long unsigned int shrinklist_len;
+};
+
+enum sgp_type {
+ SGP_READ = 0,
+ SGP_CACHE = 1,
+ SGP_NOHUGE = 2,
+ SGP_HUGE = 3,
+ SGP_WRITE = 4,
+ SGP_FALLOC = 5,
+};
+
+struct shmem_falloc {
+ wait_queue_head_t *waitq;
+ long unsigned int start;
+ long unsigned int next;
+ long unsigned int nr_falloced;
+ long unsigned int nr_unswapped;
+};
+
+struct shmem_options {
+ long long unsigned int blocks;
+ long long unsigned int inodes;
+ struct mempolicy *mpol;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+ int huge;
+ int seen;
+};
+
+enum shmem_param {
+ Opt_gid = 0,
+ Opt_huge = 1,
+ Opt_mode = 2,
+ Opt_mpol = 3,
+ Opt_nr_blocks = 4,
+ Opt_nr_inodes = 5,
+ Opt_size = 6,
+ Opt_uid = 7,
+};
+
+enum pageblock_bits {
+ PB_migrate = 0,
+ PB_migrate_end = 2,
+ PB_migrate_skip = 3,
+ NR_PAGEBLOCK_BITS = 4,
+};
+
+enum writeback_stat_item {
+ NR_DIRTY_THRESHOLD = 0,
+ NR_DIRTY_BG_THRESHOLD = 1,
+ NR_VM_WRITEBACK_STAT_ITEMS = 2,
+};
+
+struct contig_page_info {
+ long unsigned int free_pages;
+ long unsigned int free_blocks_total;
+ long unsigned int free_blocks_suitable;
+};
+
+typedef s8 pto_T_____26;
+
+struct radix_tree_iter {
+ long unsigned int index;
+ long unsigned int next_index;
+ long unsigned int tags;
+ struct xa_node *node;
+};
+
+enum {
+ RADIX_TREE_ITER_TAG_MASK = 15,
+ RADIX_TREE_ITER_TAGGED = 16,
+ RADIX_TREE_ITER_CONTIG = 32,
+};
+
+enum mminit_level {
+ MMINIT_WARNING = 0,
+ MMINIT_VERIFY = 1,
+ MMINIT_TRACE = 2,
+};
+
+struct pcpu_group_info {
+ int nr_units;
+ long unsigned int base_offset;
+ unsigned int *cpu_map;
+};
+
+struct pcpu_alloc_info {
+ size_t static_size;
+ size_t reserved_size;
+ size_t dyn_size;
+ size_t unit_size;
+ size_t atom_size;
+ size_t alloc_size;
+ size_t __ai_size;
+ int nr_groups;
+ struct pcpu_group_info groups[0];
+};
+
+typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int, size_t, size_t);
+
+typedef void (*pcpu_fc_free_fn_t)(void *, size_t);
+
+typedef void (*pcpu_fc_populate_pte_fn_t)(long unsigned int);
+
+typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int);
+
+struct trace_event_raw_percpu_alloc_percpu {
+ struct trace_entry ent;
+ bool reserved;
+ bool is_atomic;
+ size_t size;
+ size_t align;
+ void *base_addr;
+ int off;
+ void *ptr;
+ char __data[0];
+};
+
+struct trace_event_raw_percpu_free_percpu {
+ struct trace_entry ent;
+ void *base_addr;
+ int off;
+ void *ptr;
+ char __data[0];
+};
+
+struct trace_event_raw_percpu_alloc_percpu_fail {
+ struct trace_entry ent;
+ bool reserved;
+ bool is_atomic;
+ size_t size;
+ size_t align;
+ char __data[0];
+};
+
+struct trace_event_raw_percpu_create_chunk {
+ struct trace_entry ent;
+ void *base_addr;
+ char __data[0];
+};
+
+struct trace_event_raw_percpu_destroy_chunk {
+ struct trace_entry ent;
+ void *base_addr;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_percpu_alloc_percpu {};
+
+struct trace_event_data_offsets_percpu_free_percpu {};
+
+struct trace_event_data_offsets_percpu_alloc_percpu_fail {};
+
+struct trace_event_data_offsets_percpu_create_chunk {};
+
+struct trace_event_data_offsets_percpu_destroy_chunk {};
+
+typedef void (*btf_trace_percpu_alloc_percpu)(void *, bool, bool, size_t, size_t, void *, int, void *);
+
+typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *);
+
+typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t);
+
+typedef void (*btf_trace_percpu_create_chunk)(void *, void *);
+
+typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *);
+
+struct pcpu_block_md {
+ int scan_hint;
+ int scan_hint_start;
+ int contig_hint;
+ int contig_hint_start;
+ int left_free;
+ int right_free;
+ int first_free;
+ int nr_bits;
+};
+
+struct pcpu_chunk {
+ struct list_head list;
+ int free_bytes;
+ struct pcpu_block_md chunk_md;
+ void *base_addr;
+ long unsigned int *alloc_map;
+ long unsigned int *bound_map;
+ struct pcpu_block_md *md_blocks;
+ void *data;
+ bool immutable;
+ int start_offset;
+ int end_offset;
+ int nr_pages;
+ int nr_populated;
+ int nr_empty_pop_pages;
+ long unsigned int populated[0];
+};
+
+struct trace_event_raw_kmem_alloc {
+ struct trace_entry ent;
+ long unsigned int call_site;
+ const void *ptr;
+ size_t bytes_req;
+ size_t bytes_alloc;
+ gfp_t gfp_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_kmem_alloc_node {
+ struct trace_entry ent;
+ long unsigned int call_site;
+ const void *ptr;
+ size_t bytes_req;
+ size_t bytes_alloc;
+ gfp_t gfp_flags;
+ int node;
+ char __data[0];
+};
+
+struct trace_event_raw_kmem_free {
+ struct trace_entry ent;
+ long unsigned int call_site;
+ const void *ptr;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page_free {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ unsigned int order;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page_free_batched {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page_alloc {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ unsigned int order;
+ gfp_t gfp_flags;
+ int migratetype;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ unsigned int order;
+ int migratetype;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page_pcpu_drain {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ unsigned int order;
+ int migratetype;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_page_alloc_extfrag {
+ struct trace_entry ent;
+ long unsigned int pfn;
+ int alloc_order;
+ int fallback_order;
+ int alloc_migratetype;
+ int fallback_migratetype;
+ int change_ownership;
+ char __data[0];
+};
+
+struct trace_event_raw_rss_stat {
+ struct trace_entry ent;
+ unsigned int mm_id;
+ unsigned int curr;
+ int member;
+ long int size;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_kmem_alloc {};
+
+struct trace_event_data_offsets_kmem_alloc_node {};
+
+struct trace_event_data_offsets_kmem_free {};
+
+struct trace_event_data_offsets_mm_page_free {};
+
+struct trace_event_data_offsets_mm_page_free_batched {};
+
+struct trace_event_data_offsets_mm_page_alloc {};
+
+struct trace_event_data_offsets_mm_page {};
+
+struct trace_event_data_offsets_mm_page_pcpu_drain {};
+
+struct trace_event_data_offsets_mm_page_alloc_extfrag {};
+
+struct trace_event_data_offsets_rss_stat {};
+
+typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t);
+
+typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t);
+
+typedef void (*btf_trace_kmalloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int);
+
+typedef void (*btf_trace_kmem_cache_alloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int);
+
+typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *);
+
+typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *);
+
+typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int);
+
+typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *);
+
+typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int);
+
+typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int);
+
+typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int);
+
+typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int);
+
+typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int, long int);
+
+struct slabinfo {
+ long unsigned int active_objs;
+ long unsigned int num_objs;
+ long unsigned int active_slabs;
+ long unsigned int num_slabs;
+ long unsigned int shared_avail;
+ unsigned int limit;
+ unsigned int batchcount;
+ unsigned int shared;
+ unsigned int objects_per_slab;
+ unsigned int cache_order;
+};
+
+struct alloc_context {
+ struct zonelist *zonelist;
+ nodemask_t *nodemask;
+ struct zoneref *preferred_zoneref;
+ int migratetype;
+ enum zone_type highest_zoneidx;
+ bool spread_dirty_pages;
+};
+
+struct trace_event_raw_mm_compaction_isolate_template {
+ struct trace_entry ent;
+ long unsigned int start_pfn;
+ long unsigned int end_pfn;
+ long unsigned int nr_scanned;
+ long unsigned int nr_taken;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_migratepages {
+ struct trace_entry ent;
+ long unsigned int nr_migrated;
+ long unsigned int nr_failed;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_begin {
+ struct trace_entry ent;
+ long unsigned int zone_start;
+ long unsigned int migrate_pfn;
+ long unsigned int free_pfn;
+ long unsigned int zone_end;
+ bool sync;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_end {
+ struct trace_entry ent;
+ long unsigned int zone_start;
+ long unsigned int migrate_pfn;
+ long unsigned int free_pfn;
+ long unsigned int zone_end;
+ bool sync;
+ int status;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_try_to_compact_pages {
+ struct trace_entry ent;
+ int order;
+ gfp_t gfp_mask;
+ int prio;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_suitable_template {
+ struct trace_entry ent;
+ int nid;
+ enum zone_type idx;
+ int order;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_defer_template {
+ struct trace_entry ent;
+ int nid;
+ enum zone_type idx;
+ int order;
+ unsigned int considered;
+ unsigned int defer_shift;
+ int order_failed;
+ char __data[0];
+};
+
+struct trace_event_raw_mm_compaction_kcompactd_sleep {
+ struct trace_entry ent;
+ int nid;
+ char __data[0];
+};
+
+struct trace_event_raw_kcompactd_wake_template {
+ struct trace_entry ent;
+ int nid;
+ int order;
+ enum zone_type highest_zoneidx;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_mm_compaction_isolate_template {};
+
+struct trace_event_data_offsets_mm_compaction_migratepages {};
+
+struct trace_event_data_offsets_mm_compaction_begin {};
+
+struct trace_event_data_offsets_mm_compaction_end {};
+
+struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {};
+
+struct trace_event_data_offsets_mm_compaction_suitable_template {};
+
+struct trace_event_data_offsets_mm_compaction_defer_template {};
+
+struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {};
+
+struct trace_event_data_offsets_kcompactd_wake_template {};
+
+typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_mm_compaction_migratepages)(void *, long unsigned int, int, struct list_head *);
+
+typedef void (*btf_trace_mm_compaction_begin)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool);
+
+typedef void (*btf_trace_mm_compaction_end)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool, int);
+
+typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int);
+
+typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int);
+
+typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int);
+
+typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int);
+
+typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int);
+
+typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int);
+
+typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int);
+
+typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type);
+
+typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type);
+
+typedef enum {
+ ISOLATE_ABORT = 0,
+ ISOLATE_NONE = 1,
+ ISOLATE_SUCCESS = 2,
+} isolate_migrate_t;
+
+struct anon_vma_chain {
+ struct vm_area_struct *vma;
+ struct anon_vma *anon_vma;
+ struct list_head same_vma;
+ struct rb_node rb;
+ long unsigned int rb_subtree_last;
+};
+
+enum lru_status {
+ LRU_REMOVED = 0,
+ LRU_REMOVED_RETRY = 1,
+ LRU_ROTATE = 2,
+ LRU_SKIP = 3,
+ LRU_RETRY = 4,
+};
+
+typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *);
+
+typedef struct {
+ long unsigned int pd;
+} hugepd_t;
+
+struct follow_page_context {
+ struct dev_pagemap *pgmap;
+ unsigned int page_mask;
+};
+
+struct zap_details {
+ struct address_space *check_mapping;
+ long unsigned int first_index;
+ long unsigned int last_index;
+};
+
+typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *);
+
+enum {
+ SWP_USED = 1,
+ SWP_WRITEOK = 2,
+ SWP_DISCARDABLE = 4,
+ SWP_DISCARDING = 8,
+ SWP_SOLIDSTATE = 16,
+ SWP_CONTINUED = 32,
+ SWP_BLKDEV = 64,
+ SWP_ACTIVATED = 128,
+ SWP_FS = 256,
+ SWP_AREA_DISCARD = 512,
+ SWP_PAGE_DISCARD = 1024,
+ SWP_STABLE_WRITES = 2048,
+ SWP_SYNCHRONOUS_IO = 4096,
+ SWP_VALID = 8192,
+ SWP_SCANNING = 16384,
+};
+
+struct copy_subpage_arg {
+ struct page *dst;
+ struct page *src;
+ struct vm_area_struct *vma;
+};
+
+struct mm_walk;
+
+struct mm_walk_ops {
+ int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *);
+ int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *);
+ int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *);
+ void (*post_vma)(struct mm_walk *);
+};
+
+enum page_walk_action {
+ ACTION_SUBTREE = 0,
+ ACTION_CONTINUE = 1,
+ ACTION_AGAIN = 2,
+};
+
+struct mm_walk {
+ const struct mm_walk_ops *ops;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ struct vm_area_struct *vma;
+ enum page_walk_action action;
+ bool no_vma;
+ void *private;
+};
+
+enum {
+ HUGETLB_SHMFS_INODE = 1,
+ HUGETLB_ANONHUGE_INODE = 2,
+};
+
+struct trace_event_raw_vm_unmapped_area {
+ struct trace_entry ent;
+ long unsigned int addr;
+ long unsigned int total_vm;
+ long unsigned int flags;
+ long unsigned int length;
+ long unsigned int low_limit;
+ long unsigned int high_limit;
+ long unsigned int align_mask;
+ long unsigned int align_offset;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_vm_unmapped_area {};
+
+typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *);
+
+struct rmap_walk_control {
+ void *arg;
+ bool (*rmap_one)(struct page *, struct vm_area_struct *, long unsigned int, void *);
+ int (*done)(struct page *);
+ struct anon_vma * (*anon_lock)(struct page *);
+ bool (*invalid_vma)(struct vm_area_struct *, void *);
+};
+
+struct page_referenced_arg {
+ int mapcount;
+ int referenced;
+ long unsigned int vm_flags;
+ struct mem_cgroup *memcg;
+};
+
+struct vmap_area {
+ long unsigned int va_start;
+ long unsigned int va_end;
+ struct rb_node rb_node;
+ struct list_head list;
+ union {
+ long unsigned int subtree_max_size;
+ struct vm_struct *vm;
+ struct llist_node purge_list;
+ };
+};
+
+typedef unsigned int pgtbl_mod_mask;
+
+struct vfree_deferred {
+ struct llist_head list;
+ struct work_struct wq;
+};
+
+enum fit_type {
+ NOTHING_FIT = 0,
+ FL_FIT_TYPE = 1,
+ LE_FIT_TYPE = 2,
+ RE_FIT_TYPE = 3,
+ NE_FIT_TYPE = 4,
+};
+
+struct vmap_block_queue {
+ spinlock_t lock;
+ struct list_head free;
+};
+
+struct vmap_block {
+ spinlock_t lock;
+ struct vmap_area *va;
+ long unsigned int free;
+ long unsigned int dirty;
+ long unsigned int dirty_min;
+ long unsigned int dirty_max;
+ struct list_head free_list;
+ struct callback_head callback_head;
+ struct list_head purge;
+};
+
+typedef struct vmap_area *pto_T_____27;
+
+struct page_frag_cache {
+ void *va;
+ __u16 offset;
+ __u16 size;
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
+enum zone_flags {
+ ZONE_BOOSTED_WATERMARK = 0,
+};
+
+enum memmap_context {
+ MEMMAP_EARLY = 0,
+ MEMMAP_HOTPLUG = 1,
+};
+
+struct pcpu_drain {
+ struct zone *zone;
+ struct work_struct work;
+};
+
+struct madvise_walk_private {
+ struct mmu_gather *tlb;
+ bool pageout;
+};
+
+enum {
+ BIO_NO_PAGE_REF = 0,
+ BIO_CLONED = 1,
+ BIO_BOUNCED = 2,
+ BIO_USER_MAPPED = 3,
+ BIO_NULL_MAPPED = 4,
+ BIO_WORKINGSET = 5,
+ BIO_QUIET = 6,
+ BIO_CHAIN = 7,
+ BIO_REFFED = 8,
+ BIO_THROTTLED = 9,
+ BIO_TRACE_COMPLETION = 10,
+ BIO_CGROUP_ACCT = 11,
+ BIO_TRACKED = 12,
+ BIO_FLAG_LAST = 13,
+};
+
+struct vma_swap_readahead {
+ short unsigned int win;
+ short unsigned int offset;
+ short unsigned int nr_pte;
+ pte_t *ptes;
+};
+
+union swap_header {
+ struct {
+ char reserved[4086];
+ char magic[10];
+ } magic;
+ struct {
+ char bootbits[1024];
+ __u32 version;
+ __u32 last_page;
+ __u32 nr_badpages;
+ unsigned char sws_uuid[16];
+ unsigned char sws_volume[16];
+ __u32 padding[117];
+ __u32 badpages[1];
+ } info;
+};
+
+struct swap_extent {
+ struct rb_node rb_node;
+ long unsigned int start_page;
+ long unsigned int nr_pages;
+ sector_t start_block;
+};
+
+struct swap_slots_cache {
+ bool lock_initialized;
+ struct mutex alloc_lock;
+ swp_entry_t *slots;
+ int nr;
+ int cur;
+ spinlock_t free_lock;
+ swp_entry_t *slots_ret;
+ int n_ret;
+};
+
+struct dma_pool {
+ struct list_head page_list;
+ spinlock_t lock;
+ size_t size;
+ struct device *dev;
+ size_t allocation;
+ size_t boundary;
+ char name[32];
+ struct list_head pools;
+};
+
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ dma_addr_t dma;
+ unsigned int in_use;
+ unsigned int offset;
+};
+
+enum string_size_units {
+ STRING_UNITS_10 = 0,
+ STRING_UNITS_2 = 1,
+};
+
+struct resv_map {
+ struct kref refs;
+ spinlock_t lock;
+ struct list_head regions;
+ long int adds_in_progress;
+ struct list_head region_cache;
+ long int region_cache_count;
+};
+
+struct file_region {
+ struct list_head link;
+ long int from;
+ long int to;
+};
+
+struct huge_bootmem_page {
+ struct list_head list;
+ struct hstate *hstate;
+};
+
+struct cma;
+
+enum vma_resv_mode {
+ VMA_NEEDS_RESV = 0,
+ VMA_COMMIT_RESV = 1,
+ VMA_END_RESV = 2,
+ VMA_ADD_RESV = 3,
+};
+
+struct hugetlb_cgroup;
+
+enum stat_item {
+ ALLOC_FASTPATH = 0,
+ ALLOC_SLOWPATH = 1,
+ FREE_FASTPATH = 2,
+ FREE_SLOWPATH = 3,
+ FREE_FROZEN = 4,
+ FREE_ADD_PARTIAL = 5,
+ FREE_REMOVE_PARTIAL = 6,
+ ALLOC_FROM_PARTIAL = 7,
+ ALLOC_SLAB = 8,
+ ALLOC_REFILL = 9,
+ ALLOC_NODE_MISMATCH = 10,
+ FREE_SLAB = 11,
+ CPUSLAB_FLUSH = 12,
+ DEACTIVATE_FULL = 13,
+ DEACTIVATE_EMPTY = 14,
+ DEACTIVATE_TO_HEAD = 15,
+ DEACTIVATE_TO_TAIL = 16,
+ DEACTIVATE_REMOTE_FREES = 17,
+ DEACTIVATE_BYPASS = 18,
+ ORDER_FALLBACK = 19,
+ CMPXCHG_DOUBLE_CPU_FAIL = 20,
+ CMPXCHG_DOUBLE_FAIL = 21,
+ CPU_PARTIAL_ALLOC = 22,
+ CPU_PARTIAL_FREE = 23,
+ CPU_PARTIAL_NODE = 24,
+ CPU_PARTIAL_DRAIN = 25,
+ NR_SLUB_STAT_ITEMS = 26,
+};
+
+struct memory_notify {
+ long unsigned int start_pfn;
+ long unsigned int nr_pages;
+ int status_change_nid_normal;
+ int status_change_nid_high;
+ int status_change_nid;
+};
+
+struct track {
+ long unsigned int addr;
+ long unsigned int addrs[16];
+ int cpu;
+ int pid;
+ long unsigned int when;
+};
+
+enum track_item {
+ TRACK_ALLOC = 0,
+ TRACK_FREE = 1,
+};
+
+struct detached_freelist {
+ struct page *page;
+ void *tail;
+ void *freelist;
+ int cnt;
+ struct kmem_cache *s;
+};
+
+struct location {
+ long unsigned int count;
+ long unsigned int addr;
+ long long int sum_time;
+ long int min_time;
+ long int max_time;
+ long int min_pid;
+ long int max_pid;
+ long unsigned int cpus[4];
+ nodemask_t nodes;
+};
+
+struct loc_track {
+ long unsigned int max;
+ long unsigned int count;
+ struct location *loc;
+};
+
+enum slab_stat_type {
+ SL_ALL = 0,
+ SL_PARTIAL = 1,
+ SL_CPU = 2,
+ SL_OBJECTS = 3,
+ SL_TOTAL = 4,
+};
+
+struct slab_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kmem_cache *, char *);
+ ssize_t (*store)(struct kmem_cache *, const char *, size_t);
+};
+
+struct saved_alias {
+ struct kmem_cache *s;
+ const char *name;
+ struct saved_alias *next;
+};
+
+enum slab_modes {
+ M_NONE = 0,
+ M_PARTIAL = 1,
+ M_FULL = 2,
+ M_FREE = 3,
+};
+
+enum {
+ MMOP_OFFLINE = 0,
+ MMOP_ONLINE = 1,
+ MMOP_ONLINE_KERNEL = 2,
+ MMOP_ONLINE_MOVABLE = 3,
+};
+
+typedef void (*online_page_callback_t)(struct page *, unsigned int);
+
+struct memory_block {
+ long unsigned int start_section_nr;
+ long unsigned int state;
+ int online_type;
+ int phys_device;
+ struct device dev;
+ int nid;
+};
+
+struct buffer_head;
+
+typedef void bh_end_io_t(struct buffer_head *, int);
+
+struct buffer_head {
+ long unsigned int b_state;
+ struct buffer_head *b_this_page;
+ struct page *b_page;
+ sector_t b_blocknr;
+ size_t b_size;
+ char *b_data;
+ struct block_device *b_bdev;
+ bh_end_io_t *b_end_io;
+ void *b_private;
+ struct list_head b_assoc_buffers;
+ struct address_space *b_assoc_map;
+ atomic_t b_count;
+ spinlock_t b_uptodate_lock;
+};
+
+typedef struct page *new_page_t(struct page *, long unsigned int);
+
+typedef void free_page_t(struct page *, long unsigned int);
+
+enum bh_state_bits {
+ BH_Uptodate = 0,
+ BH_Dirty = 1,
+ BH_Lock = 2,
+ BH_Req = 3,
+ BH_Mapped = 4,
+ BH_New = 5,
+ BH_Async_Read = 6,
+ BH_Async_Write = 7,
+ BH_Delay = 8,
+ BH_Boundary = 9,
+ BH_Write_EIO = 10,
+ BH_Unwritten = 11,
+ BH_Quiet = 12,
+ BH_Meta = 13,
+ BH_Prio = 14,
+ BH_Defer_Completion = 15,
+ BH_PrivateStart = 16,
+};
+
+struct trace_event_raw_mm_migrate_pages {
+ struct trace_entry ent;
+ long unsigned int succeeded;
+ long unsigned int failed;
+ enum migrate_mode mode;
+ int reason;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_mm_migrate_pages {};
+
+typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, enum migrate_mode, int);
+
+struct mem_cgroup_reclaim_cookie {
+ pg_data_t *pgdat;
+ unsigned int generation;
+};
+
+struct mem_cgroup_tree_per_node {
+ struct rb_root rb_root;
+ struct rb_node *rb_rightmost;
+ spinlock_t lock;
+};
+
+struct mem_cgroup_tree {
+ struct mem_cgroup_tree_per_node *rb_tree_per_node[1];
+};
+
+struct mem_cgroup_eventfd_list {
+ struct list_head list;
+ struct eventfd_ctx *eventfd;
+};
+
+struct mem_cgroup_event {
+ struct mem_cgroup *memcg;
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *);
+ void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *);
+ poll_table pt;
+ wait_queue_head_t *wqh;
+ wait_queue_entry_t wait;
+ struct work_struct remove;
+};
+
+struct move_charge_struct {
+ spinlock_t lock;
+ struct mm_struct *mm;
+ struct mem_cgroup *from;
+ struct mem_cgroup *to;
+ long unsigned int flags;
+ long unsigned int precharge;
+ long unsigned int moved_charge;
+ long unsigned int moved_swap;
+ struct task_struct *moving_task;
+ wait_queue_head_t waitq;
+};
+
+enum res_type {
+ _MEM = 0,
+ _MEMSWAP = 1,
+ _OOM_TYPE = 2,
+ _KMEM = 3,
+ _TCP = 4,
+};
+
+struct oom_wait_info {
+ struct mem_cgroup *memcg;
+ wait_queue_entry_t wait;
+};
+
+enum oom_status {
+ OOM_SUCCESS = 0,
+ OOM_FAILED = 1,
+ OOM_ASYNC = 2,
+ OOM_SKIPPED = 3,
+};
+
+struct memcg_stock_pcp {
+ struct mem_cgroup *cached;
+ unsigned int nr_pages;
+ struct work_struct work;
+ long unsigned int flags;
+};
+
+struct memcg_kmem_cache_create_work {
+ struct mem_cgroup *memcg;
+ struct kmem_cache *cachep;
+ struct work_struct work;
+};
+
+enum {
+ RES_USAGE = 0,
+ RES_LIMIT = 1,
+ RES_MAX_USAGE = 2,
+ RES_FAILCNT = 3,
+ RES_SOFT_LIMIT = 4,
+};
+
+union mc_target {
+ struct page *page;
+ swp_entry_t ent;
+};
+
+enum mc_target_type {
+ MC_TARGET_NONE = 0,
+ MC_TARGET_PAGE = 1,
+ MC_TARGET_SWAP = 2,
+ MC_TARGET_DEVICE = 3,
+};
+
+struct uncharge_gather {
+ struct mem_cgroup *memcg;
+ long unsigned int nr_pages;
+ long unsigned int pgpgout;
+ long unsigned int nr_kmem;
+ struct page *dummy_page;
+};
+
+typedef long int pao_T_____5;
+
+typedef long int pto_T_____28;
+
+enum vmpressure_levels {
+ VMPRESSURE_LOW = 0,
+ VMPRESSURE_MEDIUM = 1,
+ VMPRESSURE_CRITICAL = 2,
+ VMPRESSURE_NUM_LEVELS = 3,
+};
+
+enum vmpressure_modes {
+ VMPRESSURE_NO_PASSTHROUGH = 0,
+ VMPRESSURE_HIERARCHY = 1,
+ VMPRESSURE_LOCAL = 2,
+ VMPRESSURE_NUM_MODES = 3,
+};
+
+struct vmpressure_event {
+ struct eventfd_ctx *efd;
+ enum vmpressure_levels level;
+ enum vmpressure_modes mode;
+ struct list_head node;
+};
+
+struct swap_cgroup_ctrl {
+ struct page **map;
+ long unsigned int length;
+ spinlock_t lock;
+};
+
+struct swap_cgroup {
+ short unsigned int id;
+};
+
+struct trace_event_raw_test_pages_isolated {
+ struct trace_entry ent;
+ long unsigned int start_pfn;
+ long unsigned int end_pfn;
+ long unsigned int fin_pfn;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_test_pages_isolated {};
+
+typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int);
+
+struct hugetlbfs_inode_info {
+ struct shared_policy policy;
+ struct inode vfs_inode;
+ unsigned int seals;
+};
+
+struct open_how {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+};
+
+struct fs_context_operations___2;
+
+struct open_flags {
+ int open_flag;
+ umode_t mode;
+ int acc_mode;
+ int intent;
+ int lookup_flags;
+};
+
+typedef __kernel_long_t __kernel_off_t;
+
+typedef __kernel_off_t off_t;
+
+struct file_dedupe_range_info {
+ __s64 dest_fd;
+ __u64 dest_offset;
+ __u64 bytes_deduped;
+ __s32 status;
+ __u32 reserved;
+};
+
+struct file_dedupe_range {
+ __u64 src_offset;
+ __u64 src_length;
+ __u16 dest_count;
+ __u16 reserved1;
+ __u32 reserved2;
+ struct file_dedupe_range_info info[0];
+};
+
+typedef int __kernel_rwf_t;
+
+typedef __kernel_rwf_t rwf_t;
+
+enum vfs_get_super_keying {
+ vfs_get_single_super = 0,
+ vfs_get_single_reconf_super = 1,
+ vfs_get_keyed_super = 2,
+ vfs_get_independent_super = 3,
+};
+
+struct kobj_map;
+
+struct char_device_struct {
+ struct char_device_struct *next;
+ unsigned int major;
+ unsigned int baseminor;
+ int minorct;
+ char name[64];
+ struct cdev *cdev;
+};
+
+struct stat {
+ __kernel_ulong_t st_dev;
+ __kernel_ulong_t st_ino;
+ __kernel_ulong_t st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ __kernel_ulong_t st_rdev;
+ __kernel_long_t st_size;
+ __kernel_long_t st_blksize;
+ __kernel_long_t st_blocks;
+ __kernel_ulong_t st_atime;
+ __kernel_ulong_t st_atime_nsec;
+ __kernel_ulong_t st_mtime;
+ __kernel_ulong_t st_mtime_nsec;
+ __kernel_ulong_t st_ctime;
+ __kernel_ulong_t st_ctime_nsec;
+ __kernel_long_t __unused[3];
+};
+
+struct __old_kernel_stat {
+ short unsigned int st_dev;
+ short unsigned int st_ino;
+ short unsigned int st_mode;
+ short unsigned int st_nlink;
+ short unsigned int st_uid;
+ short unsigned int st_gid;
+ short unsigned int st_rdev;
+ unsigned int st_size;
+ unsigned int st_atime;
+ unsigned int st_mtime;
+ unsigned int st_ctime;
+};
+
+struct statx_timestamp {
+ __s64 tv_sec;
+ __u32 tv_nsec;
+ __s32 __reserved;
+};
+
+struct statx {
+ __u32 stx_mask;
+ __u32 stx_blksize;
+ __u64 stx_attributes;
+ __u32 stx_nlink;
+ __u32 stx_uid;
+ __u32 stx_gid;
+ __u16 stx_mode;
+ __u16 __spare0[1];
+ __u64 stx_ino;
+ __u64 stx_size;
+ __u64 stx_blocks;
+ __u64 stx_attributes_mask;
+ struct statx_timestamp stx_atime;
+ struct statx_timestamp stx_btime;
+ struct statx_timestamp stx_ctime;
+ struct statx_timestamp stx_mtime;
+ __u32 stx_rdev_major;
+ __u32 stx_rdev_minor;
+ __u32 stx_dev_major;
+ __u32 stx_dev_minor;
+ __u64 stx_mnt_id;
+ __u64 __spare2;
+ __u64 __spare3[12];
+};
+
+struct mount;
+
+struct mnt_namespace {
+ atomic_t count;
+ struct ns_common ns;
+ struct mount *root;
+ struct list_head list;
+ spinlock_t ns_lock;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ u64 seq;
+ wait_queue_head_t poll;
+ u64 event;
+ unsigned int mounts;
+ unsigned int pending_mounts;
+};
+
+struct mnt_pcp;
+
+struct mountpoint;
+
+struct mount {
+ struct hlist_node mnt_hash;
+ struct mount *mnt_parent;
+ struct dentry *mnt_mountpoint;
+ struct vfsmount mnt;
+ union {
+ struct callback_head mnt_rcu;
+ struct llist_node mnt_llist;
+ };
+ struct mnt_pcp *mnt_pcp;
+ struct list_head mnt_mounts;
+ struct list_head mnt_child;
+ struct list_head mnt_instance;
+ const char *mnt_devname;
+ struct list_head mnt_list;
+ struct list_head mnt_expire;
+ struct list_head mnt_share;
+ struct list_head mnt_slave_list;
+ struct list_head mnt_slave;
+ struct mount *mnt_master;
+ struct mnt_namespace *mnt_ns;
+ struct mountpoint *mnt_mp;
+ union {
+ struct hlist_node mnt_mp_list;
+ struct hlist_node mnt_umount;
+ };
+ struct list_head mnt_umounting;
+ struct fsnotify_mark_connector *mnt_fsnotify_marks;
+ __u32 mnt_fsnotify_mask;
+ int mnt_id;
+ int mnt_group_id;
+ int mnt_expiry_mark;
+ struct hlist_head mnt_pins;
+ struct hlist_head mnt_stuck_children;
+};
+
+struct mnt_pcp {
+ int mnt_count;
+ int mnt_writers;
+};
+
+struct mountpoint {
+ struct hlist_node m_hash;
+ struct dentry *m_dentry;
+ struct hlist_head m_list;
+ int m_count;
+};
+
+typedef short unsigned int ushort;
+
+struct user_arg_ptr {
+ union {
+ const char * const *native;
+ } ptr;
+};
+
+enum inode_i_mutex_lock_class {
+ I_MUTEX_NORMAL = 0,
+ I_MUTEX_PARENT = 1,
+ I_MUTEX_CHILD = 2,
+ I_MUTEX_XATTR = 3,
+ I_MUTEX_NONDIR2 = 4,
+ I_MUTEX_PARENT2 = 5,
+};
+
+struct pseudo_fs_context {
+ const struct super_operations *ops;
+ const struct xattr_handler **xattr;
+ const struct dentry_operations *dops;
+ long unsigned int magic;
+};
+
+struct name_snapshot {
+ struct qstr name;
+ unsigned char inline_name[32];
+};
+
+struct saved {
+ struct path link;
+ struct delayed_call done;
+ const char *name;
+ unsigned int seq;
+};
+
+struct nameidata {
+ struct path path;
+ struct qstr last;
+ struct path root;
+ struct inode *inode;
+ unsigned int flags;
+ unsigned int seq;
+ unsigned int m_seq;
+ unsigned int r_seq;
+ int last_type;
+ unsigned int depth;
+ int total_link_count;
+ struct saved *stack;
+ struct saved internal[2];
+ struct filename *name;
+ struct nameidata *saved;
+ unsigned int root_seq;
+ int dfd;
+ kuid_t dir_uid;
+ umode_t dir_mode;
+};
+
+enum {
+ LAST_NORM = 0,
+ LAST_ROOT = 1,
+ LAST_DOT = 2,
+ LAST_DOTDOT = 3,
+};
+
+enum {
+ WALK_TRAILING = 1,
+ WALK_MORE = 2,
+ WALK_NOFOLLOW = 4,
+};
+
+struct word_at_a_time {
+ const long unsigned int one_bits;
+ const long unsigned int high_bits;
+};
+
+struct f_owner_ex {
+ int type;
+ __kernel_pid_t pid;
+};
+
+struct flock {
+ short int l_type;
+ short int l_whence;
+ __kernel_off_t l_start;
+ __kernel_off_t l_len;
+ __kernel_pid_t l_pid;
+};
+
+struct file_clone_range {
+ __s64 src_fd;
+ __u64 src_offset;
+ __u64 src_length;
+ __u64 dest_offset;
+};
+
+typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int);
+
+struct fiemap_extent;
+
+struct fiemap_extent_info {
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent *fi_extents_start;
+};
+
+struct space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len;
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4];
+};
+
+struct fiemap_extent {
+ __u64 fe_logical;
+ __u64 fe_physical;
+ __u64 fe_length;
+ __u64 fe_reserved64[2];
+ __u32 fe_flags;
+ __u32 fe_reserved[3];
+};
+
+struct fiemap {
+ __u64 fm_start;
+ __u64 fm_length;
+ __u32 fm_flags;
+ __u32 fm_mapped_extents;
+ __u32 fm_extent_count;
+ __u32 fm_reserved;
+ struct fiemap_extent fm_extents[0];
+};
+
+struct old_linux_dirent {
+ long unsigned int d_ino;
+ long unsigned int d_offset;
+ short unsigned int d_namlen;
+ char d_name[1];
+};
+
+struct readdir_callback {
+ struct dir_context ctx;
+ struct old_linux_dirent *dirent;
+ int result;
+};
+
+struct linux_dirent {
+ long unsigned int d_ino;
+ long unsigned int d_off;
+ short unsigned int d_reclen;
+ char d_name[1];
+};
+
+struct getdents_callback {
+ struct dir_context ctx;
+ struct linux_dirent *current_dir;
+ int prev_reclen;
+ int count;
+ int error;
+};
+
+struct getdents_callback64 {
+ struct dir_context ctx;
+ struct linux_dirent64 *current_dir;
+ int prev_reclen;
+ int count;
+ int error;
+};
+
+typedef struct {
+ long unsigned int fds_bits[16];
+} __kernel_fd_set;
+
+typedef __kernel_fd_set fd_set;
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+struct poll_table_entry {
+ struct file *filp;
+ __poll_t key;
+ wait_queue_entry_t wait;
+ wait_queue_head_t *wait_address;
+};
+
+struct poll_table_page;
+
+struct poll_wqueues {
+ poll_table pt;
+ struct poll_table_page *table;
+ struct task_struct *polling_task;
+ int triggered;
+ int error;
+ int inline_index;
+ struct poll_table_entry inline_entries[9];
+};
+
+struct poll_table_page {
+ struct poll_table_page *next;
+ struct poll_table_entry *entry;
+ struct poll_table_entry entries[0];
+};
+
+enum poll_time_type {
+ PT_TIMEVAL = 0,
+ PT_OLD_TIMEVAL = 1,
+ PT_TIMESPEC = 2,
+ PT_OLD_TIMESPEC = 3,
+};
+
+typedef struct {
+ long unsigned int *in;
+ long unsigned int *out;
+ long unsigned int *ex;
+ long unsigned int *res_in;
+ long unsigned int *res_out;
+ long unsigned int *res_ex;
+} fd_set_bits;
+
+struct sigset_argpack {
+ sigset_t *p;
+ size_t size;
+};
+
+struct poll_list {
+ struct poll_list *next;
+ int len;
+ struct pollfd entries[0];
+};
+
+enum dentry_d_lock_class {
+ DENTRY_D_LOCK_NORMAL = 0,
+ DENTRY_D_LOCK_NESTED = 1,
+};
+
+struct external_name {
+ union {
+ atomic_t count;
+ struct callback_head head;
+ } u;
+ unsigned char name[0];
+};
+
+enum d_walk_ret {
+ D_WALK_CONTINUE = 0,
+ D_WALK_QUIT = 1,
+ D_WALK_NORETRY = 2,
+ D_WALK_SKIP = 3,
+};
+
+struct check_mount {
+ struct vfsmount *mnt;
+ unsigned int mounted;
+};
+
+struct select_data {
+ struct dentry *start;
+ union {
+ long int found;
+ struct dentry *victim;
+ };
+ struct list_head dispose;
+};
+
+struct fsxattr {
+ __u32 fsx_xflags;
+ __u32 fsx_extsize;
+ __u32 fsx_nextents;
+ __u32 fsx_projid;
+ __u32 fsx_cowextsize;
+ unsigned char fsx_pad[8];
+};
+
+enum file_time_flags {
+ S_ATIME = 1,
+ S_MTIME = 2,
+ S_CTIME = 4,
+ S_VERSION = 8,
+};
+
+struct proc_mounts {
+ struct mnt_namespace *ns;
+ struct path root;
+ int (*show)(struct seq_file *, struct vfsmount *);
+ struct mount cursor;
+};
+
+enum umount_tree_flags {
+ UMOUNT_SYNC = 1,
+ UMOUNT_PROPAGATE = 2,
+ UMOUNT_CONNECTED = 4,
+};
+
+struct simple_transaction_argresp {
+ ssize_t size;
+ char data[0];
+};
+
+struct simple_attr {
+ int (*get)(void *, u64 *);
+ int (*set)(void *, u64);
+ char get_buf[24];
+ char set_buf[24];
+ void *data;
+ const char *fmt;
+ struct mutex mutex;
+};
+
+struct wb_writeback_work {
+ long int nr_pages;
+ struct super_block *sb;
+ long unsigned int *older_than_this;
+ enum writeback_sync_modes sync_mode;
+ unsigned int tagged_writepages: 1;
+ unsigned int for_kupdate: 1;
+ unsigned int range_cyclic: 1;
+ unsigned int for_background: 1;
+ unsigned int for_sync: 1;
+ unsigned int auto_free: 1;
+ enum wb_reason reason;
+ struct list_head list;
+ struct wb_completion *done;
+};
+
+struct trace_event_raw_writeback_page_template {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ long unsigned int index;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_dirty_inode_template {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ long unsigned int state;
+ long unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_inode_foreign_history {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ ino_t cgroup_ino;
+ unsigned int history;
+ char __data[0];
+};
+
+struct trace_event_raw_inode_switch_wbs {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ ino_t old_cgroup_ino;
+ ino_t new_cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_track_foreign_dirty {
+ struct trace_entry ent;
+ char name[32];
+ u64 bdi_id;
+ ino_t ino;
+ unsigned int memcg_id;
+ ino_t cgroup_ino;
+ ino_t page_cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_flush_foreign {
+ struct trace_entry ent;
+ char name[32];
+ ino_t cgroup_ino;
+ unsigned int frn_bdi_id;
+ unsigned int frn_memcg_id;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_write_inode_template {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ int sync_mode;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_work_class {
+ struct trace_entry ent;
+ char name[32];
+ long int nr_pages;
+ dev_t sb_dev;
+ int sync_mode;
+ int for_kupdate;
+ int range_cyclic;
+ int for_background;
+ int reason;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_pages_written {
+ struct trace_entry ent;
+ long int pages;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_class {
+ struct trace_entry ent;
+ char name[32];
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_bdi_register {
+ struct trace_entry ent;
+ char name[32];
+ char __data[0];
+};
+
+struct trace_event_raw_wbc_class {
+ struct trace_entry ent;
+ char name[32];
+ long int nr_to_write;
+ long int pages_skipped;
+ int sync_mode;
+ int for_kupdate;
+ int for_background;
+ int for_reclaim;
+ int range_cyclic;
+ long int range_start;
+ long int range_end;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_queue_io {
+ struct trace_entry ent;
+ char name[32];
+ long unsigned int older;
+ long int age;
+ int moved;
+ int reason;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_global_dirty_state {
+ struct trace_entry ent;
+ long unsigned int nr_dirty;
+ long unsigned int nr_writeback;
+ long unsigned int background_thresh;
+ long unsigned int dirty_thresh;
+ long unsigned int dirty_limit;
+ long unsigned int nr_dirtied;
+ long unsigned int nr_written;
+ char __data[0];
+};
+
+struct trace_event_raw_bdi_dirty_ratelimit {
+ struct trace_entry ent;
+ char bdi[32];
+ long unsigned int write_bw;
+ long unsigned int avg_write_bw;
+ long unsigned int dirty_rate;
+ long unsigned int dirty_ratelimit;
+ long unsigned int task_ratelimit;
+ long unsigned int balanced_dirty_ratelimit;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_balance_dirty_pages {
+ struct trace_entry ent;
+ char bdi[32];
+ long unsigned int limit;
+ long unsigned int setpoint;
+ long unsigned int dirty;
+ long unsigned int bdi_setpoint;
+ long unsigned int bdi_dirty;
+ long unsigned int dirty_ratelimit;
+ long unsigned int task_ratelimit;
+ unsigned int dirtied;
+ unsigned int dirtied_pause;
+ long unsigned int paused;
+ long int pause;
+ long unsigned int period;
+ long int think;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_sb_inodes_requeue {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ long unsigned int state;
+ long unsigned int dirtied_when;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_congest_waited_template {
+ struct trace_entry ent;
+ unsigned int usec_timeout;
+ unsigned int usec_delayed;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_single_inode_template {
+ struct trace_entry ent;
+ char name[32];
+ ino_t ino;
+ long unsigned int state;
+ long unsigned int dirtied_when;
+ long unsigned int writeback_index;
+ long int nr_to_write;
+ long unsigned int wrote;
+ ino_t cgroup_ino;
+ char __data[0];
+};
+
+struct trace_event_raw_writeback_inode_template {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long unsigned int state;
+ __u16 mode;
+ long unsigned int dirtied_when;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_writeback_page_template {};
+
+struct trace_event_data_offsets_writeback_dirty_inode_template {};
+
+struct trace_event_data_offsets_inode_foreign_history {};
+
+struct trace_event_data_offsets_inode_switch_wbs {};
+
+struct trace_event_data_offsets_track_foreign_dirty {};
+
+struct trace_event_data_offsets_flush_foreign {};
+
+struct trace_event_data_offsets_writeback_write_inode_template {};
+
+struct trace_event_data_offsets_writeback_work_class {};
+
+struct trace_event_data_offsets_writeback_pages_written {};
+
+struct trace_event_data_offsets_writeback_class {};
+
+struct trace_event_data_offsets_writeback_bdi_register {};
+
+struct trace_event_data_offsets_wbc_class {};
+
+struct trace_event_data_offsets_writeback_queue_io {};
+
+struct trace_event_data_offsets_global_dirty_state {};
+
+struct trace_event_data_offsets_bdi_dirty_ratelimit {};
+
+struct trace_event_data_offsets_balance_dirty_pages {};
+
+struct trace_event_data_offsets_writeback_sb_inodes_requeue {};
+
+struct trace_event_data_offsets_writeback_congest_waited_template {};
+
+struct trace_event_data_offsets_writeback_single_inode_template {};
+
+struct trace_event_data_offsets_writeback_inode_template {};
+
+typedef void (*btf_trace_writeback_dirty_page)(void *, struct page *, struct address_space *);
+
+typedef void (*btf_trace_wait_on_page_writeback)(void *, struct page *, struct address_space *);
+
+typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int);
+
+typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int);
+
+typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int);
+
+typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int);
+
+typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *);
+
+typedef void (*btf_trace_track_foreign_dirty)(void *, struct page *, struct bdi_writeback *);
+
+typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *);
+
+typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *);
+
+typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *);
+
+typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *);
+
+typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *);
+
+typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *);
+
+typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *);
+
+typedef void (*btf_trace_writeback_pages_written)(void *, long int);
+
+typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *);
+
+typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *);
+
+typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *);
+
+typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, int);
+
+typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int);
+
+typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *);
+
+typedef void (*btf_trace_writeback_congestion_wait)(void *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_writeback_wait_iff_congested)(void *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int);
+
+typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int);
+
+typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *);
+
+typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *);
+
+typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *);
+
+typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *);
+
+typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *);
+
+struct inode_switch_wbs_context {
+ struct inode *inode;
+ struct bdi_writeback *new_wb;
+ struct callback_head callback_head;
+ struct work_struct work;
+};
+
+struct splice_desc {
+ size_t total_len;
+ unsigned int len;
+ unsigned int flags;
+ union {
+ void *userptr;
+ struct file *file;
+ void *data;
+ } u;
+ loff_t pos;
+ loff_t *opos;
+ size_t num_spliced;
+ bool need_wakeup;
+};
+
+typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *);
+
+typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *);
+
+struct utimbuf {
+ __kernel_old_time_t actime;
+ __kernel_old_time_t modtime;
+};
+
+typedef int __kernel_daddr_t;
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+struct statfs {
+ __kernel_long_t f_type;
+ __kernel_long_t f_bsize;
+ __kernel_long_t f_blocks;
+ __kernel_long_t f_bfree;
+ __kernel_long_t f_bavail;
+ __kernel_long_t f_files;
+ __kernel_long_t f_ffree;
+ __kernel_fsid_t f_fsid;
+ __kernel_long_t f_namelen;
+ __kernel_long_t f_frsize;
+ __kernel_long_t f_flags;
+ __kernel_long_t f_spare[4];
+};
+
+struct statfs64 {
+ __kernel_long_t f_type;
+ __kernel_long_t f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __kernel_long_t f_namelen;
+ __kernel_long_t f_frsize;
+ __kernel_long_t f_flags;
+ __kernel_long_t f_spare[4];
+};
+
+struct fs_pin {
+ wait_queue_head_t wait;
+ int done;
+ struct hlist_node s_list;
+ struct hlist_node m_list;
+ void (*kill)(struct fs_pin *);
+};
+
+typedef struct ns_common *ns_get_path_helper_t(void *);
+
+struct ns_get_path_task_args {
+ const struct proc_ns_operations *ns_ops;
+ struct task_struct *task;
+};
+
+enum legacy_fs_param {
+ LEGACY_FS_UNSET_PARAMS = 0,
+ LEGACY_FS_MONOLITHIC_PARAMS = 1,
+ LEGACY_FS_INDIVIDUAL_PARAMS = 2,
+};
+
+struct legacy_fs_context {
+ char *legacy_data;
+ size_t data_size;
+ enum legacy_fs_param param_type;
+};
+
+enum fsconfig_command {
+ FSCONFIG_SET_FLAG = 0,
+ FSCONFIG_SET_STRING = 1,
+ FSCONFIG_SET_BINARY = 2,
+ FSCONFIG_SET_PATH = 3,
+ FSCONFIG_SET_PATH_EMPTY = 4,
+ FSCONFIG_SET_FD = 5,
+ FSCONFIG_CMD_CREATE = 6,
+ FSCONFIG_CMD_RECONFIGURE = 7,
+};
+
+struct dax_device;
+
+struct iomap_page_ops;
+
+struct iomap {
+ u64 addr;
+ loff_t offset;
+ u64 length;
+ u16 type;
+ u16 flags;
+ struct block_device *bdev;
+ struct dax_device *dax_dev;
+ void *inline_data;
+ void *private;
+ const struct iomap_page_ops *page_ops;
+};
+
+struct iomap_page_ops {
+ int (*page_prepare)(struct inode *, loff_t, unsigned int, struct iomap *);
+ void (*page_done)(struct inode *, loff_t, unsigned int, struct page *, struct iomap *);
+};
+
+enum blktrace_act {
+ __BLK_TA_QUEUE = 1,
+ __BLK_TA_BACKMERGE = 2,
+ __BLK_TA_FRONTMERGE = 3,
+ __BLK_TA_GETRQ = 4,
+ __BLK_TA_SLEEPRQ = 5,
+ __BLK_TA_REQUEUE = 6,
+ __BLK_TA_ISSUE = 7,
+ __BLK_TA_COMPLETE = 8,
+ __BLK_TA_PLUG = 9,
+ __BLK_TA_UNPLUG_IO = 10,
+ __BLK_TA_UNPLUG_TIMER = 11,
+ __BLK_TA_INSERT = 12,
+ __BLK_TA_SPLIT = 13,
+ __BLK_TA_BOUNCE = 14,
+ __BLK_TA_REMAP = 15,
+ __BLK_TA_ABORT = 16,
+ __BLK_TA_DRV_DATA = 17,
+ __BLK_TA_CGROUP = 256,
+};
+
+struct decrypt_bh_ctx {
+ struct work_struct work;
+ struct buffer_head *bh;
+};
+
+struct bh_lru {
+ struct buffer_head *bhs[16];
+};
+
+struct bh_accounting {
+ int nr;
+ int ratelimit;
+};
+
+typedef struct buffer_head *pto_T_____29;
+
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1,
+ DISK_EVENT_EJECT_REQUEST = 2,
+};
+
+struct blk_integrity_profile;
+
+struct blk_integrity {
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+};
+
+enum {
+ BIOSET_NEED_BVECS = 1,
+ BIOSET_NEED_RESCUER = 2,
+};
+
+struct bdev_inode {
+ struct block_device bdev;
+ struct inode vfs_inode;
+};
+
+struct blkdev_dio {
+ union {
+ struct kiocb *iocb;
+ struct task_struct *waiter;
+ };
+ size_t size;
+ atomic_t ref;
+ bool multi_bio: 1;
+ bool should_dirty: 1;
+ bool is_sync: 1;
+ struct bio bio;
+};
+
+struct bd_holder_disk {
+ struct list_head list;
+ struct gendisk *disk;
+ int refcnt;
+};
+
+typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *);
+
+typedef void dio_submit_t(struct bio *, struct inode *, loff_t);
+
+enum {
+ DIO_LOCKING = 1,
+ DIO_SKIP_HOLES = 2,
+};
+
+struct dio_submit {
+ struct bio *bio;
+ unsigned int blkbits;
+ unsigned int blkfactor;
+ unsigned int start_zero_done;
+ int pages_in_io;
+ sector_t block_in_file;
+ unsigned int blocks_available;
+ int reap_counter;
+ sector_t final_block_in_request;
+ int boundary;
+ get_block_t *get_block;
+ dio_submit_t *submit_io;
+ loff_t logical_offset_in_bio;
+ sector_t final_block_in_bio;
+ sector_t next_block_for_io;
+ struct page *cur_page;
+ unsigned int cur_page_offset;
+ unsigned int cur_page_len;
+ sector_t cur_page_block;
+ loff_t cur_page_fs_offset;
+ struct iov_iter *iter;
+ unsigned int head;
+ unsigned int tail;
+ size_t from;
+ size_t to;
+};
+
+struct dio {
+ int flags;
+ int op;
+ int op_flags;
+ blk_qc_t bio_cookie;
+ struct gendisk *bio_disk;
+ struct inode *inode;
+ loff_t i_size;
+ dio_iodone_t *end_io;
+ void *private;
+ spinlock_t bio_lock;
+ int page_errors;
+ int is_async;
+ bool defer_completion;
+ bool should_dirty;
+ int io_error;
+ long unsigned int refcount;
+ struct bio *bio_list;
+ struct task_struct *waiter;
+ struct kiocb *iocb;
+ ssize_t result;
+ union {
+ struct page *pages[64];
+ struct work_struct complete_work;
+ };
+ long: 64;
+ long: 64;
+};
+
+struct bvec_iter_all {
+ struct bio_vec bv;
+ int idx;
+ unsigned int done;
+};
+
+struct mpage_readpage_args {
+ struct bio *bio;
+ struct page *page;
+ unsigned int nr_pages;
+ bool is_readahead;
+ sector_t last_block_in_bio;
+ struct buffer_head map_bh;
+ long unsigned int first_logical_block;
+ get_block_t *get_block;
+};
+
+struct mpage_data {
+ struct bio *bio;
+ sector_t last_block_in_bio;
+ get_block_t *get_block;
+ unsigned int use_writepage;
+};
+
+typedef u32 nlink_t;
+
+typedef int (*proc_write_t)(struct file *, char *, size_t);
+
+struct proc_dir_entry {
+ atomic_t in_use;
+ refcount_t refcnt;
+ struct list_head pde_openers;
+ spinlock_t pde_unload_lock;
+ struct completion *pde_unload_completion;
+ const struct inode_operations *proc_iops;
+ union {
+ const struct proc_ops *proc_ops;
+ const struct file_operations *proc_dir_ops;
+ };
+ const struct dentry_operations *proc_dops;
+ union {
+ const struct seq_operations *seq_ops;
+ int (*single_show)(struct seq_file *, void *);
+ };
+ proc_write_t write;
+ void *data;
+ unsigned int state_size;
+ unsigned int low_ino;
+ nlink_t nlink;
+ kuid_t uid;
+ kgid_t gid;
+ loff_t size;
+ struct proc_dir_entry *parent;
+ struct rb_root subdir;
+ struct rb_node subdir_node;
+ char *name;
+ umode_t mode;
+ u8 flags;
+ u8 namelen;
+ char inline_name[0];
+};
+
+union proc_op {
+ int (*proc_get_link)(struct dentry *, struct path *);
+ int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *);
+ const char *lsm;
+};
+
+struct proc_inode {
+ struct pid *pid;
+ unsigned int fd;
+ union proc_op op;
+ struct proc_dir_entry *pde;
+ struct ctl_table_header *sysctl;
+ struct ctl_table *sysctl_entry;
+ struct hlist_node sibling_inodes;
+ const struct proc_ns_operations *ns_ops;
+ struct inode vfs_inode;
+};
+
+struct proc_fs_opts {
+ int flag;
+ const char *str;
+};
+
+struct file_handle {
+ __u32 handle_bytes;
+ int handle_type;
+ unsigned char f_handle[0];
+};
+
+struct inotify_inode_mark {
+ struct fsnotify_mark fsn_mark;
+ int wd;
+};
+
+struct dnotify_struct {
+ struct dnotify_struct *dn_next;
+ __u32 dn_mask;
+ int dn_fd;
+ struct file *dn_filp;
+ fl_owner_t dn_owner;
+};
+
+struct dnotify_mark {
+ struct fsnotify_mark fsn_mark;
+ struct dnotify_struct *dn;
+};
+
+struct inotify_event_info {
+ struct fsnotify_event fse;
+ u32 mask;
+ int wd;
+ u32 sync_cookie;
+ int name_len;
+ char name[0];
+};
+
+struct inotify_event {
+ __s32 wd;
+ __u32 mask;
+ __u32 cookie;
+ __u32 len;
+ char name[0];
+};
+
+enum {
+ FAN_EVENT_INIT = 0,
+ FAN_EVENT_REPORTED = 1,
+ FAN_EVENT_ANSWERED = 2,
+ FAN_EVENT_CANCELED = 3,
+};
+
+struct fanotify_fh {
+ unsigned char buf[12];
+ u8 type;
+ u8 len;
+ short: 16;
+};
+
+enum fanotify_event_type {
+ FANOTIFY_EVENT_TYPE_FID = 0,
+ FANOTIFY_EVENT_TYPE_FID_NAME = 1,
+ FANOTIFY_EVENT_TYPE_PATH = 2,
+ FANOTIFY_EVENT_TYPE_PATH_PERM = 3,
+};
+
+struct fanotify_event {
+ struct fsnotify_event fse;
+ u32 mask;
+ enum fanotify_event_type type;
+ struct pid *pid;
+};
+
+struct fanotify_fid_event {
+ struct fanotify_event fae;
+ __kernel_fsid_t fsid;
+ struct fanotify_fh object_fh;
+};
+
+struct fanotify_name_event {
+ struct fanotify_event fae;
+ __kernel_fsid_t fsid;
+ struct fanotify_fh dir_fh;
+ u8 name_len;
+ char name[0];
+};
+
+struct fanotify_path_event {
+ struct fanotify_event fae;
+ struct path path;
+};
+
+struct fanotify_perm_event {
+ struct fanotify_event fae;
+ struct path path;
+ short unsigned int response;
+ short unsigned int state;
+ int fd;
+};
+
+struct fanotify_event_metadata {
+ __u32 event_len;
+ __u8 vers;
+ __u8 reserved;
+ __u16 metadata_len;
+ __u64 mask;
+ __s32 fd;
+ __s32 pid;
+};
+
+struct fanotify_event_info_header {
+ __u8 info_type;
+ __u8 pad;
+ __u16 len;
+};
+
+struct fanotify_event_info_fid {
+ struct fanotify_event_info_header hdr;
+ __kernel_fsid_t fsid;
+ unsigned char handle[0];
+};
+
+struct fanotify_response {
+ __s32 fd;
+ __u32 response;
+};
+
+struct epoll_event {
+ __poll_t events;
+ __u64 data;
+} __attribute__((packed));
+
+struct wake_irq;
+
+struct wakeup_source {
+ const char *name;
+ int id;
+ struct list_head entry;
+ spinlock_t lock;
+ struct wake_irq *wakeirq;
+ struct timer_list timer;
+ long unsigned int timer_expires;
+ ktime_t total_time;
+ ktime_t max_time;
+ ktime_t last_time;
+ ktime_t start_prevent_time;
+ ktime_t prevent_sleep_time;
+ long unsigned int event_count;
+ long unsigned int active_count;
+ long unsigned int relax_count;
+ long unsigned int expire_count;
+ long unsigned int wakeup_count;
+ struct device *dev;
+ bool active: 1;
+ bool autosleep_enabled: 1;
+};
+
+struct epoll_filefd {
+ struct file *file;
+ int fd;
+} __attribute__((packed));
+
+struct nested_call_node {
+ struct list_head llink;
+ void *cookie;
+ void *ctx;
+};
+
+struct nested_calls {
+ struct list_head tasks_call_list;
+ spinlock_t lock;
+};
+
+struct eventpoll;
+
+struct epitem {
+ union {
+ struct rb_node rbn;
+ struct callback_head rcu;
+ };
+ struct list_head rdllink;
+ struct epitem *next;
+ struct epoll_filefd ffd;
+ int nwait;
+ struct list_head pwqlist;
+ struct eventpoll *ep;
+ struct list_head fllink;
+ struct wakeup_source *ws;
+ struct epoll_event event;
+};
+
+struct eventpoll {
+ struct mutex mtx;
+ wait_queue_head_t wq;
+ wait_queue_head_t poll_wait;
+ struct list_head rdllist;
+ rwlock_t lock;
+ struct rb_root_cached rbr;
+ struct epitem *ovflist;
+ struct wakeup_source *ws;
+ struct user_struct *user;
+ struct file *file;
+ struct list_head visited_list_link;
+ int visited;
+ unsigned int napi_id;
+ u8 nests;
+};
+
+struct eppoll_entry {
+ struct list_head llink;
+ struct epitem *base;
+ wait_queue_entry_t wait;
+ wait_queue_head_t *whead;
+};
+
+struct ep_pqueue {
+ poll_table pt;
+ struct epitem *epi;
+};
+
+struct ep_send_events_data {
+ int maxevents;
+ struct epoll_event *events;
+ int res;
+};
+
+struct signalfd_siginfo {
+ __u32 ssi_signo;
+ __s32 ssi_errno;
+ __s32 ssi_code;
+ __u32 ssi_pid;
+ __u32 ssi_uid;
+ __s32 ssi_fd;
+ __u32 ssi_tid;
+ __u32 ssi_band;
+ __u32 ssi_overrun;
+ __u32 ssi_trapno;
+ __s32 ssi_status;
+ __s32 ssi_int;
+ __u64 ssi_ptr;
+ __u64 ssi_utime;
+ __u64 ssi_stime;
+ __u64 ssi_addr;
+ __u16 ssi_addr_lsb;
+ __u16 __pad2;
+ __s32 ssi_syscall;
+ __u64 ssi_call_addr;
+ __u32 ssi_arch;
+ __u8 __pad[28];
+};
+
+struct signalfd_ctx {
+ sigset_t sigmask;
+};
+
+struct timerfd_ctx {
+ union {
+ struct hrtimer tmr;
+ struct alarm alarm;
+ } t;
+ ktime_t tintv;
+ ktime_t moffs;
+ wait_queue_head_t wqh;
+ u64 ticks;
+ int clockid;
+ short unsigned int expired;
+ short unsigned int settime_flags;
+ struct callback_head rcu;
+ struct list_head clist;
+ spinlock_t cancel_lock;
+ bool might_cancel;
+};
+
+struct eventfd_ctx___2 {
+ struct kref kref;
+ wait_queue_head_t wqh;
+ __u64 count;
+ unsigned int flags;
+ int id;
+};
+
+struct kioctx;
+
+struct kioctx_table {
+ struct callback_head rcu;
+ unsigned int nr;
+ struct kioctx *table[0];
+};
+
+typedef __kernel_ulong_t aio_context_t;
+
+enum {
+ IOCB_CMD_PREAD = 0,
+ IOCB_CMD_PWRITE = 1,
+ IOCB_CMD_FSYNC = 2,
+ IOCB_CMD_FDSYNC = 3,
+ IOCB_CMD_POLL = 5,
+ IOCB_CMD_NOOP = 6,
+ IOCB_CMD_PREADV = 7,
+ IOCB_CMD_PWRITEV = 8,
+};
+
+struct io_event {
+ __u64 data;
+ __u64 obj;
+ __s64 res;
+ __s64 res2;
+};
+
+struct iocb {
+ __u64 aio_data;
+ __u32 aio_key;
+ __kernel_rwf_t aio_rw_flags;
+ __u16 aio_lio_opcode;
+ __s16 aio_reqprio;
+ __u32 aio_fildes;
+ __u64 aio_buf;
+ __u64 aio_nbytes;
+ __s64 aio_offset;
+ __u64 aio_reserved2;
+ __u32 aio_flags;
+ __u32 aio_resfd;
+};
+
+typedef int kiocb_cancel_fn(struct kiocb *);
+
+struct aio_ring {
+ unsigned int id;
+ unsigned int nr;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int magic;
+ unsigned int compat_features;
+ unsigned int incompat_features;
+ unsigned int header_length;
+ struct io_event io_events[0];
+};
+
+struct kioctx_cpu;
+
+struct ctx_rq_wait;
+
+struct kioctx {
+ struct percpu_ref users;
+ atomic_t dead;
+ struct percpu_ref reqs;
+ long unsigned int user_id;
+ struct kioctx_cpu *cpu;
+ unsigned int req_batch;
+ unsigned int max_reqs;
+ unsigned int nr_events;
+ long unsigned int mmap_base;
+ long unsigned int mmap_size;
+ struct page **ring_pages;
+ long int nr_pages;
+ struct rcu_work free_rwork;
+ struct ctx_rq_wait *rq_wait;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct {
+ atomic_t reqs_available;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct {
+ spinlock_t ctx_lock;
+ struct list_head active_reqs;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct {
+ struct mutex ring_lock;
+ wait_queue_head_t wait;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct {
+ unsigned int tail;
+ unsigned int completed_events;
+ spinlock_t completion_lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct page *internal_pages[8];
+ struct file *aio_ring_file;
+ unsigned int id;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct kioctx_cpu {
+ unsigned int reqs_available;
+};
+
+struct ctx_rq_wait {
+ struct completion comp;
+ atomic_t count;
+};
+
+struct fsync_iocb {
+ struct file *file;
+ struct work_struct work;
+ bool datasync;
+ struct cred *creds;
+};
+
+struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+ bool done;
+ bool cancelled;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+};
+
+struct aio_kiocb {
+ union {
+ struct file *ki_filp;
+ struct kiocb rw;
+ struct fsync_iocb fsync;
+ struct poll_iocb poll;
+ };
+ struct kioctx *ki_ctx;
+ kiocb_cancel_fn *ki_cancel;
+ struct io_event ki_res;
+ struct list_head ki_list;
+ refcount_t ki_refcnt;
+ struct eventfd_ctx *ki_eventfd;
+};
+
+struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
+ int error;
+};
+
+struct __aio_sigset {
+ const sigset_t *sigmask;
+ size_t sigsetsize;
+};
+
+enum {
+ PERCPU_REF_INIT_ATOMIC = 1,
+ PERCPU_REF_INIT_DEAD = 2,
+ PERCPU_REF_ALLOW_REINIT = 4,
+};
+
+struct user_msghdr {
+ void *msg_name;
+ int msg_namelen;
+ struct iovec *msg_iov;
+ __kernel_size_t msg_iovlen;
+ void *msg_control;
+ __kernel_size_t msg_controllen;
+ unsigned int msg_flags;
+};
+
+struct scm_fp_list {
+ short int count;
+ short int max;
+ struct user_struct *user;
+ struct file *fp[253];
+};
+
+struct unix_skb_parms {
+ struct pid *pid;
+ kuid_t uid;
+ kgid_t gid;
+ struct scm_fp_list *fp;
+ u32 consumed;
+};
+
+struct trace_event_raw_io_uring_create {
+ struct trace_entry ent;
+ int fd;
+ void *ctx;
+ u32 sq_entries;
+ u32 cq_entries;
+ u32 flags;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_register {
+ struct trace_entry ent;
+ void *ctx;
+ unsigned int opcode;
+ unsigned int nr_files;
+ unsigned int nr_bufs;
+ bool eventfd;
+ long int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_file_get {
+ struct trace_entry ent;
+ void *ctx;
+ int fd;
+ char __data[0];
+};
+
+struct io_wq_work;
+
+struct trace_event_raw_io_uring_queue_async_work {
+ struct trace_entry ent;
+ void *ctx;
+ int rw;
+ void *req;
+ struct io_wq_work *work;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ struct files_struct *files;
+ struct mm_struct *mm;
+ const struct cred *creds;
+ struct fs_struct *fs;
+ unsigned int flags;
+ pid_t task_pid;
+};
+
+struct trace_event_raw_io_uring_defer {
+ struct trace_entry ent;
+ void *ctx;
+ void *req;
+ long long unsigned int data;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_link {
+ struct trace_entry ent;
+ void *ctx;
+ void *req;
+ void *target_req;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_cqring_wait {
+ struct trace_entry ent;
+ void *ctx;
+ int min_events;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_fail_link {
+ struct trace_entry ent;
+ void *req;
+ void *link;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_complete {
+ struct trace_entry ent;
+ void *ctx;
+ u64 user_data;
+ long int res;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_submit_sqe {
+ struct trace_entry ent;
+ void *ctx;
+ u8 opcode;
+ u64 user_data;
+ bool force_nonblock;
+ bool sq_thread;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_poll_arm {
+ struct trace_entry ent;
+ void *ctx;
+ u8 opcode;
+ u64 user_data;
+ int mask;
+ int events;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_poll_wake {
+ struct trace_entry ent;
+ void *ctx;
+ u8 opcode;
+ u64 user_data;
+ int mask;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_task_add {
+ struct trace_entry ent;
+ void *ctx;
+ u8 opcode;
+ u64 user_data;
+ int mask;
+ char __data[0];
+};
+
+struct trace_event_raw_io_uring_task_run {
+ struct trace_entry ent;
+ void *ctx;
+ u8 opcode;
+ u64 user_data;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_io_uring_create {};
+
+struct trace_event_data_offsets_io_uring_register {};
+
+struct trace_event_data_offsets_io_uring_file_get {};
+
+struct trace_event_data_offsets_io_uring_queue_async_work {};
+
+struct trace_event_data_offsets_io_uring_defer {};
+
+struct trace_event_data_offsets_io_uring_link {};
+
+struct trace_event_data_offsets_io_uring_cqring_wait {};
+
+struct trace_event_data_offsets_io_uring_fail_link {};
+
+struct trace_event_data_offsets_io_uring_complete {};
+
+struct trace_event_data_offsets_io_uring_submit_sqe {};
+
+struct trace_event_data_offsets_io_uring_poll_arm {};
+
+struct trace_event_data_offsets_io_uring_poll_wake {};
+
+struct trace_event_data_offsets_io_uring_task_add {};
+
+struct trace_event_data_offsets_io_uring_task_run {};
+
+typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32);
+
+typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, bool, long int);
+
+typedef void (*btf_trace_io_uring_file_get)(void *, void *, int);
+
+typedef void (*btf_trace_io_uring_queue_async_work)(void *, void *, int, void *, struct io_wq_work *, unsigned int);
+
+typedef void (*btf_trace_io_uring_defer)(void *, void *, void *, long long unsigned int);
+
+typedef void (*btf_trace_io_uring_link)(void *, void *, void *, void *);
+
+typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int);
+
+typedef void (*btf_trace_io_uring_fail_link)(void *, void *, void *);
+
+typedef void (*btf_trace_io_uring_complete)(void *, void *, u64, long int);
+
+typedef void (*btf_trace_io_uring_submit_sqe)(void *, void *, u8, u64, bool, bool);
+
+typedef void (*btf_trace_io_uring_poll_arm)(void *, void *, u8, u64, int, int);
+
+typedef void (*btf_trace_io_uring_poll_wake)(void *, void *, u8, u64, int);
+
+typedef void (*btf_trace_io_uring_task_add)(void *, void *, u8, u64, int);
+
+typedef void (*btf_trace_io_uring_task_run)(void *, void *, u8, u64);
+
+struct io_uring_sqe {
+ __u8 opcode;
+ __u8 flags;
+ __u16 ioprio;
+ __s32 fd;
+ union {
+ __u64 off;
+ __u64 addr2;
+ };
+ union {
+ __u64 addr;
+ __u64 splice_off_in;
+ };
+ __u32 len;
+ union {
+ __kernel_rwf_t rw_flags;
+ __u32 fsync_flags;
+ __u16 poll_events;
+ __u32 sync_range_flags;
+ __u32 msg_flags;
+ __u32 timeout_flags;
+ __u32 accept_flags;
+ __u32 cancel_flags;
+ __u32 open_flags;
+ __u32 statx_flags;
+ __u32 fadvise_advice;
+ __u32 splice_flags;
+ };
+ __u64 user_data;
+ union {
+ struct {
+ union {
+ __u16 buf_index;
+ __u16 buf_group;
+ };
+ __u16 personality;
+ __s32 splice_fd_in;
+ };
+ __u64 __pad2[3];
+ };
+};
+
+enum {
+ IOSQE_FIXED_FILE_BIT = 0,
+ IOSQE_IO_DRAIN_BIT = 1,
+ IOSQE_IO_LINK_BIT = 2,
+ IOSQE_IO_HARDLINK_BIT = 3,
+ IOSQE_ASYNC_BIT = 4,
+ IOSQE_BUFFER_SELECT_BIT = 5,
+};
+
+enum {
+ IORING_OP_NOP = 0,
+ IORING_OP_READV = 1,
+ IORING_OP_WRITEV = 2,
+ IORING_OP_FSYNC = 3,
+ IORING_OP_READ_FIXED = 4,
+ IORING_OP_WRITE_FIXED = 5,
+ IORING_OP_POLL_ADD = 6,
+ IORING_OP_POLL_REMOVE = 7,
+ IORING_OP_SYNC_FILE_RANGE = 8,
+ IORING_OP_SENDMSG = 9,
+ IORING_OP_RECVMSG = 10,
+ IORING_OP_TIMEOUT = 11,
+ IORING_OP_TIMEOUT_REMOVE = 12,
+ IORING_OP_ACCEPT = 13,
+ IORING_OP_ASYNC_CANCEL = 14,
+ IORING_OP_LINK_TIMEOUT = 15,
+ IORING_OP_CONNECT = 16,
+ IORING_OP_FALLOCATE = 17,
+ IORING_OP_OPENAT = 18,
+ IORING_OP_CLOSE = 19,
+ IORING_OP_FILES_UPDATE = 20,
+ IORING_OP_STATX = 21,
+ IORING_OP_READ = 22,
+ IORING_OP_WRITE = 23,
+ IORING_OP_FADVISE = 24,
+ IORING_OP_MADVISE = 25,
+ IORING_OP_SEND = 26,
+ IORING_OP_RECV = 27,
+ IORING_OP_OPENAT2 = 28,
+ IORING_OP_EPOLL_CTL = 29,
+ IORING_OP_SPLICE = 30,
+ IORING_OP_PROVIDE_BUFFERS = 31,
+ IORING_OP_REMOVE_BUFFERS = 32,
+ IORING_OP_TEE = 33,
+ IORING_OP_LAST = 34,
+};
+
+struct io_uring_cqe {
+ __u64 user_data;
+ __s32 res;
+ __u32 flags;
+};
+
+enum {
+ IORING_CQE_BUFFER_SHIFT = 16,
+};
+
+struct io_sqring_offsets {
+ __u32 head;
+ __u32 tail;
+ __u32 ring_mask;
+ __u32 ring_entries;
+ __u32 flags;
+ __u32 dropped;
+ __u32 array;
+ __u32 resv1;
+ __u64 resv2;
+};
+
+struct io_cqring_offsets {
+ __u32 head;
+ __u32 tail;
+ __u32 ring_mask;
+ __u32 ring_entries;
+ __u32 overflow;
+ __u32 cqes;
+ __u32 flags;
+ __u32 resv1;
+ __u64 resv2;
+};
+
+struct io_uring_params {
+ __u32 sq_entries;
+ __u32 cq_entries;
+ __u32 flags;
+ __u32 sq_thread_cpu;
+ __u32 sq_thread_idle;
+ __u32 features;
+ __u32 wq_fd;
+ __u32 resv[3];
+ struct io_sqring_offsets sq_off;
+ struct io_cqring_offsets cq_off;
+};
+
+struct io_uring_files_update {
+ __u32 offset;
+ __u32 resv;
+ __u64 fds;
+};
+
+struct io_uring_probe_op {
+ __u8 op;
+ __u8 resv;
+ __u16 flags;
+ __u32 resv2;
+};
+
+struct io_uring_probe {
+ __u8 last_op;
+ __u8 ops_len;
+ __u16 resv;
+ __u32 resv2[3];
+ struct io_uring_probe_op ops[0];
+};
+
+enum {
+ IO_WQ_WORK_CANCEL = 1,
+ IO_WQ_WORK_HASHED = 4,
+ IO_WQ_WORK_UNBOUND = 32,
+ IO_WQ_WORK_NO_CANCEL = 256,
+ IO_WQ_WORK_CONCURRENT = 512,
+ IO_WQ_HASH_SHIFT = 24,
+};
+
+enum io_wq_cancel {
+ IO_WQ_CANCEL_OK = 0,
+ IO_WQ_CANCEL_RUNNING = 1,
+ IO_WQ_CANCEL_NOTFOUND = 2,
+};
+
+typedef void free_work_fn(struct io_wq_work *);
+
+typedef void io_wq_work_fn(struct io_wq_work **);
+
+struct io_wq_data {
+ struct user_struct *user;
+ io_wq_work_fn *do_work;
+ free_work_fn *free_work;
+};
+
+struct io_uring {
+ u32 head;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u32 tail;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct io_rings {
+ struct io_uring sq;
+ struct io_uring cq;
+ u32 sq_ring_mask;
+ u32 cq_ring_mask;
+ u32 sq_ring_entries;
+ u32 cq_ring_entries;
+ u32 sq_dropped;
+ u32 sq_flags;
+ u32 cq_flags;
+ u32 cq_overflow;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct io_uring_cqe cqes[0];
+};
+
+struct io_mapped_ubuf {
+ u64 ubuf;
+ size_t len;
+ struct bio_vec *bvec;
+ unsigned int nr_bvecs;
+};
+
+struct fixed_file_table {
+ struct file **files;
+};
+
+struct fixed_file_data;
+
+struct fixed_file_ref_node {
+ struct percpu_ref refs;
+ struct list_head node;
+ struct list_head file_list;
+ struct fixed_file_data *file_data;
+ struct llist_node llist;
+};
+
+struct io_ring_ctx;
+
+struct fixed_file_data {
+ struct fixed_file_table *table;
+ struct io_ring_ctx *ctx;
+ struct percpu_ref *cur_refs;
+ struct percpu_ref refs;
+ struct completion done;
+ struct list_head ref_list;
+ spinlock_t lock;
+};
+
+struct io_wq;
+
+struct io_kiocb;
+
+struct io_ring_ctx {
+ struct {
+ struct percpu_ref refs;
+ long: 64;
+ };
+ struct {
+ unsigned int flags;
+ unsigned int compat: 1;
+ unsigned int account_mem: 1;
+ unsigned int cq_overflow_flushed: 1;
+ unsigned int drain_next: 1;
+ unsigned int eventfd_async: 1;
+ u32 *sq_array;
+ unsigned int cached_sq_head;
+ unsigned int sq_entries;
+ unsigned int sq_mask;
+ unsigned int sq_thread_idle;
+ unsigned int cached_sq_dropped;
+ atomic_t cached_cq_overflow;
+ long unsigned int sq_check_overflow;
+ struct list_head defer_list;
+ struct list_head timeout_list;
+ struct list_head cq_overflow_list;
+ wait_queue_head_t inflight_wait;
+ struct io_uring_sqe *sq_sqes;
+ long: 64;
+ };
+ struct io_rings *rings;
+ struct io_wq *io_wq;
+ struct task_struct *sqo_thread;
+ struct mm_struct *sqo_mm;
+ wait_queue_head_t sqo_wait;
+ struct fixed_file_data *file_data;
+ unsigned int nr_user_files;
+ int ring_fd;
+ struct file *ring_file;
+ unsigned int nr_user_bufs;
+ struct io_mapped_ubuf *user_bufs;
+ struct user_struct *user;
+ const struct cred *creds;
+ struct completion ref_comp;
+ struct completion sq_thread_comp;
+ struct io_kiocb *fallback_req;
+ struct socket *ring_sock;
+ struct idr io_buffer_idr;
+ struct idr personality_idr;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct {
+ unsigned int cached_cq_tail;
+ unsigned int cq_entries;
+ unsigned int cq_mask;
+ atomic_t cq_timeouts;
+ long unsigned int cq_check_overflow;
+ struct wait_queue_head cq_wait;
+ struct fasync_struct *cq_fasync;
+ struct eventfd_ctx *cq_ev_fd;
+ long: 64;
+ };
+ struct {
+ struct mutex uring_lock;
+ wait_queue_head_t wait;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ struct {
+ spinlock_t completion_lock;
+ struct list_head poll_list;
+ struct hlist_head *cancel_hash;
+ unsigned int cancel_hash_bits;
+ bool poll_multi_file;
+ spinlock_t inflight_lock;
+ struct list_head inflight_list;
+ long: 64;
+ long: 64;
+ };
+ struct delayed_work file_put_work;
+ struct llist_head file_put_llist;
+ struct work_struct exit_work;
+ long: 64;
+};
+
+struct io_buffer {
+ struct list_head list;
+ __u64 addr;
+ __s32 len;
+ __u16 bid;
+};
+
+struct io_rw {
+ struct kiocb kiocb;
+ u64 addr;
+ u64 len;
+};
+
+struct io_poll_iocb {
+ struct file *file;
+ union {
+ struct wait_queue_head *head;
+ u64 addr;
+ };
+ __poll_t events;
+ bool done;
+ bool canceled;
+ struct wait_queue_entry wait;
+};
+
+struct io_accept {
+ struct file *file;
+ struct sockaddr *addr;
+ int *addr_len;
+ int flags;
+ long unsigned int nofile;
+};
+
+struct io_sync {
+ struct file *file;
+ loff_t len;
+ loff_t off;
+ int flags;
+ int mode;
+};
+
+struct io_cancel {
+ struct file *file;
+ u64 addr;
+};
+
+struct io_timeout {
+ struct file *file;
+ u64 addr;
+ int flags;
+ u32 off;
+ u32 target_seq;
+};
+
+struct io_connect {
+ struct file *file;
+ struct sockaddr *addr;
+ int addr_len;
+};
+
+struct io_sr_msg {
+ struct file *file;
+ union {
+ struct user_msghdr *msg;
+ void *buf;
+ };
+ int msg_flags;
+ int bgid;
+ size_t len;
+ struct io_buffer *kbuf;
+};
+
+struct io_open {
+ struct file *file;
+ int dfd;
+ struct filename *filename;
+ struct open_how how;
+ long unsigned int nofile;
+};
+
+struct io_close {
+ struct file *file;
+ struct file *put_file;
+ int fd;
+};
+
+struct io_files_update {
+ struct file *file;
+ u64 arg;
+ u32 nr_args;
+ u32 offset;
+};
+
+struct io_fadvise {
+ struct file *file;
+ u64 offset;
+ u32 len;
+ u32 advice;
+};
+
+struct io_madvise {
+ struct file *file;
+ u64 addr;
+ u32 len;
+ u32 advice;
+};
+
+struct io_epoll {
+ struct file *file;
+ int epfd;
+ int op;
+ int fd;
+ struct epoll_event event;
+} __attribute__((packed));
+
+struct io_splice {
+ struct file *file_out;
+ struct file *file_in;
+ loff_t off_out;
+ loff_t off_in;
+ u64 len;
+ unsigned int flags;
+};
+
+struct io_provide_buf {
+ struct file *file;
+ __u64 addr;
+ __s32 len;
+ __u32 bgid;
+ __u16 nbufs;
+ __u16 bid;
+};
+
+struct io_statx {
+ struct file *file;
+ int dfd;
+ unsigned int mask;
+ unsigned int flags;
+ const char *filename;
+ struct statx *buffer;
+};
+
+struct io_async_ctx;
+
+struct async_poll;
+
+struct io_kiocb {
+ union {
+ struct file *file;
+ struct io_rw rw;
+ struct io_poll_iocb poll;
+ struct io_accept accept;
+ struct io_sync sync;
+ struct io_cancel cancel;
+ struct io_timeout timeout;
+ struct io_connect connect;
+ struct io_sr_msg sr_msg;
+ struct io_open open;
+ struct io_close close;
+ struct io_files_update files_update;
+ struct io_fadvise fadvise;
+ struct io_madvise madvise;
+ struct io_epoll epoll;
+ struct io_splice splice;
+ struct io_provide_buf pbuf;
+ struct io_statx statx;
+ };
+ struct io_async_ctx *io;
+ int cflags;
+ u8 opcode;
+ u8 iopoll_completed;
+ u16 buf_index;
+ struct io_ring_ctx *ctx;
+ struct list_head list;
+ unsigned int flags;
+ refcount_t refs;
+ struct task_struct *task;
+ long unsigned int fsize;
+ u64 user_data;
+ u32 result;
+ u32 sequence;
+ struct list_head link_list;
+ struct list_head inflight_entry;
+ struct percpu_ref *fixed_file_refs;
+ union {
+ struct {
+ struct callback_head task_work;
+ struct hlist_node hash_node;
+ struct async_poll *apoll;
+ };
+ struct io_wq_work work;
+ };
+};
+
+struct io_timeout_data {
+ struct io_kiocb *req;
+ struct hrtimer timer;
+ struct timespec64 ts;
+ enum hrtimer_mode mode;
+};
+
+struct io_async_connect {
+ struct __kernel_sockaddr_storage address;
+};
+
+struct io_async_msghdr {
+ struct iovec fast_iov[8];
+ struct iovec *iov;
+ struct sockaddr *uaddr;
+ struct msghdr msg;
+ struct __kernel_sockaddr_storage addr;
+};
+
+struct io_async_rw {
+ struct iovec fast_iov[8];
+ struct iovec *iov;
+ ssize_t nr_segs;
+ ssize_t size;
+};
+
+struct io_async_ctx {
+ union {
+ struct io_async_rw rw;
+ struct io_async_msghdr msg;
+ struct io_async_connect connect;
+ struct io_timeout_data timeout;
+ };
+};
+
+enum {
+ REQ_F_FIXED_FILE_BIT = 0,
+ REQ_F_IO_DRAIN_BIT = 1,
+ REQ_F_LINK_BIT = 2,
+ REQ_F_HARDLINK_BIT = 3,
+ REQ_F_FORCE_ASYNC_BIT = 4,
+ REQ_F_BUFFER_SELECT_BIT = 5,
+ REQ_F_LINK_HEAD_BIT = 6,
+ REQ_F_LINK_NEXT_BIT = 7,
+ REQ_F_FAIL_LINK_BIT = 8,
+ REQ_F_INFLIGHT_BIT = 9,
+ REQ_F_CUR_POS_BIT = 10,
+ REQ_F_NOWAIT_BIT = 11,
+ REQ_F_LINK_TIMEOUT_BIT = 12,
+ REQ_F_TIMEOUT_BIT = 13,
+ REQ_F_ISREG_BIT = 14,
+ REQ_F_MUST_PUNT_BIT = 15,
+ REQ_F_TIMEOUT_NOSEQ_BIT = 16,
+ REQ_F_COMP_LOCKED_BIT = 17,
+ REQ_F_NEED_CLEANUP_BIT = 18,
+ REQ_F_OVERFLOW_BIT = 19,
+ REQ_F_POLLED_BIT = 20,
+ REQ_F_BUFFER_SELECTED_BIT = 21,
+ REQ_F_NO_FILE_TABLE_BIT = 22,
+ REQ_F_QUEUE_TIMEOUT_BIT = 23,
+ REQ_F_WORK_INITIALIZED_BIT = 24,
+ __REQ_F_LAST_BIT = 25,
+};
+
+enum {
+ REQ_F_FIXED_FILE = 1,
+ REQ_F_IO_DRAIN = 2,
+ REQ_F_LINK = 4,
+ REQ_F_HARDLINK = 8,
+ REQ_F_FORCE_ASYNC = 16,
+ REQ_F_BUFFER_SELECT = 32,
+ REQ_F_LINK_HEAD = 64,
+ REQ_F_LINK_NEXT = 128,
+ REQ_F_FAIL_LINK = 256,
+ REQ_F_INFLIGHT = 512,
+ REQ_F_CUR_POS = 1024,
+ REQ_F_NOWAIT = 2048,
+ REQ_F_LINK_TIMEOUT = 4096,
+ REQ_F_TIMEOUT = 8192,
+ REQ_F_ISREG = 16384,
+ REQ_F_MUST_PUNT = 32768,
+ REQ_F_TIMEOUT_NOSEQ = 65536,
+ REQ_F_COMP_LOCKED = 131072,
+ REQ_F_NEED_CLEANUP = 262144,
+ REQ_F_OVERFLOW = 524288,
+ REQ_F_POLLED = 1048576,
+ REQ_F_BUFFER_SELECTED = 2097152,
+ REQ_F_NO_FILE_TABLE = 4194304,
+ REQ_F_QUEUE_TIMEOUT = 8388608,
+ REQ_F_WORK_INITIALIZED = 16777216,
+};
+
+struct async_poll {
+ struct io_poll_iocb poll;
+ struct io_wq_work work;
+};
+
+struct io_submit_state {
+ struct blk_plug plug;
+ void *reqs[8];
+ unsigned int free_reqs;
+ struct file *file;
+ unsigned int fd;
+ unsigned int has_refs;
+ unsigned int used_refs;
+ unsigned int ios_left;
+};
+
+struct io_op_def {
+ unsigned int async_ctx: 1;
+ unsigned int needs_mm: 1;
+ unsigned int needs_file: 1;
+ unsigned int needs_file_no_error: 1;
+ unsigned int hash_reg_file: 1;
+ unsigned int unbound_nonreg_file: 1;
+ unsigned int not_supported: 1;
+ unsigned int file_table: 1;
+ unsigned int needs_fs: 1;
+ unsigned int pollin: 1;
+ unsigned int pollout: 1;
+ unsigned int buffer_select: 1;
+};
+
+struct req_batch {
+ void *reqs[8];
+ int to_free;
+ int need_iter;
+};
+
+struct io_poll_table {
+ struct poll_table_struct pt;
+ struct io_kiocb *req;
+ int error;
+};
+
+struct io_wait_queue {
+ struct wait_queue_entry wq;
+ struct io_ring_ctx *ctx;
+ unsigned int to_wait;
+ unsigned int nr_timeouts;
+};
+
+struct io_file_put {
+ struct list_head list;
+ struct file *file;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+typedef bool work_cancel_fn(struct io_wq_work *, void *);
+
+enum {
+ IO_WORKER_F_UP = 1,
+ IO_WORKER_F_RUNNING = 2,
+ IO_WORKER_F_FREE = 4,
+ IO_WORKER_F_EXITING = 8,
+ IO_WORKER_F_FIXED = 16,
+ IO_WORKER_F_BOUND = 32,
+};
+
+enum {
+ IO_WQ_BIT_EXIT = 0,
+ IO_WQ_BIT_CANCEL = 1,
+ IO_WQ_BIT_ERROR = 2,
+};
+
+enum {
+ IO_WQE_FLAG_STALLED = 1,
+};
+
+struct io_wqe;
+
+struct io_worker {
+ refcount_t ref;
+ unsigned int flags;
+ struct hlist_nulls_node nulls_node;
+ struct list_head all_list;
+ struct task_struct *task;
+ struct io_wqe *wqe;
+ struct io_wq_work *cur_work;
+ spinlock_t lock;
+ struct callback_head rcu;
+ struct mm_struct *mm;
+ const struct cred *cur_creds;
+ const struct cred *saved_creds;
+ struct files_struct *restore_files;
+ struct fs_struct *restore_fs;
+};
+
+struct io_wqe_acct {
+ unsigned int nr_workers;
+ unsigned int max_workers;
+ atomic_t nr_running;
+};
+
+struct io_wq___2;
+
+struct io_wqe {
+ struct {
+ spinlock_t lock;
+ struct io_wq_work_list work_list;
+ long unsigned int hash_map;
+ unsigned int flags;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ };
+ int node;
+ struct io_wqe_acct acct[2];
+ struct hlist_nulls_head free_list;
+ struct list_head all_list;
+ struct io_wq___2 *wq;
+ struct io_wq_work *hash_tail[64];
+};
+
+enum {
+ IO_WQ_ACCT_BOUND = 0,
+ IO_WQ_ACCT_UNBOUND = 1,
+};
+
+struct io_wq___2 {
+ struct io_wqe **wqes;
+ long unsigned int state;
+ free_work_fn *free_work;
+ io_wq_work_fn *do_work;
+ struct task_struct *manager;
+ struct user_struct *user;
+ refcount_t refs;
+ struct completion done;
+ refcount_t use_refs;
+};
+
+struct io_cb_cancel_data {
+ work_cancel_fn *fn;
+ void *data;
+};
+
+typedef long unsigned int dax_entry_t;
+
+struct iomap_ops {
+ int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *);
+ int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *);
+};
+
+struct trace_event_raw_dax_pmd_fault_class {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int vm_start;
+ long unsigned int vm_end;
+ long unsigned int vm_flags;
+ long unsigned int address;
+ long unsigned int pgoff;
+ long unsigned int max_pgoff;
+ dev_t dev;
+ unsigned int flags;
+ int result;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_pmd_load_hole_class {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int vm_flags;
+ long unsigned int address;
+ struct page *zero_page;
+ void *radix_entry;
+ dev_t dev;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_pmd_insert_mapping_class {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int vm_flags;
+ long unsigned int address;
+ long int length;
+ u64 pfn_val;
+ void *radix_entry;
+ dev_t dev;
+ int write;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_pte_fault_class {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int vm_flags;
+ long unsigned int address;
+ long unsigned int pgoff;
+ dev_t dev;
+ unsigned int flags;
+ int result;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_insert_mapping {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int vm_flags;
+ long unsigned int address;
+ void *radix_entry;
+ dev_t dev;
+ int write;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_writeback_range_class {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int start_index;
+ long unsigned int end_index;
+ dev_t dev;
+ char __data[0];
+};
+
+struct trace_event_raw_dax_writeback_one {
+ struct trace_entry ent;
+ long unsigned int ino;
+ long unsigned int pgoff;
+ long unsigned int pglen;
+ dev_t dev;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_dax_pmd_fault_class {};
+
+struct trace_event_data_offsets_dax_pmd_load_hole_class {};
+
+struct trace_event_data_offsets_dax_pmd_insert_mapping_class {};
+
+struct trace_event_data_offsets_dax_pte_fault_class {};
+
+struct trace_event_data_offsets_dax_insert_mapping {};
+
+struct trace_event_data_offsets_dax_writeback_range_class {};
+
+struct trace_event_data_offsets_dax_writeback_one {};
+
+typedef void (*btf_trace_dax_pmd_fault)(void *, struct inode *, struct vm_fault *, long unsigned int, int);
+
+typedef void (*btf_trace_dax_pmd_fault_done)(void *, struct inode *, struct vm_fault *, long unsigned int, int);
+
+typedef void (*btf_trace_dax_pmd_load_hole)(void *, struct inode *, struct vm_fault *, struct page *, void *);
+
+typedef void (*btf_trace_dax_pmd_load_hole_fallback)(void *, struct inode *, struct vm_fault *, struct page *, void *);
+
+typedef void (*btf_trace_dax_pmd_insert_mapping)(void *, struct inode *, struct vm_fault *, long int, pfn_t, void *);
+
+typedef void (*btf_trace_dax_pte_fault)(void *, struct inode *, struct vm_fault *, int);
+
+typedef void (*btf_trace_dax_pte_fault_done)(void *, struct inode *, struct vm_fault *, int);
+
+typedef void (*btf_trace_dax_load_hole)(void *, struct inode *, struct vm_fault *, int);
+
+typedef void (*btf_trace_dax_insert_pfn_mkwrite_no_entry)(void *, struct inode *, struct vm_fault *, int);
+
+typedef void (*btf_trace_dax_insert_pfn_mkwrite)(void *, struct inode *, struct vm_fault *, int);
+
+typedef void (*btf_trace_dax_insert_mapping)(void *, struct inode *, struct vm_fault *, void *);
+
+typedef void (*btf_trace_dax_writeback_range)(void *, struct inode *, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_dax_writeback_range_done)(void *, struct inode *, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_dax_writeback_one)(void *, struct inode *, long unsigned int, long unsigned int);
+
+struct exceptional_entry_key {
+ struct xarray *xa;
+ long unsigned int entry_start;
+};
+
+struct wait_exceptional_entry_queue {
+ wait_queue_entry_t wait;
+ struct exceptional_entry_key key;
+};
+
+struct flock64 {
+ short int l_type;
+ short int l_whence;
+ __kernel_loff_t l_start;
+ __kernel_loff_t l_len;
+ __kernel_pid_t l_pid;
+};
+
+struct trace_event_raw_locks_get_lock_context {
+ struct trace_entry ent;
+ long unsigned int i_ino;
+ dev_t s_dev;
+ unsigned char type;
+ struct file_lock_context *ctx;
+ char __data[0];
+};
+
+struct trace_event_raw_filelock_lock {
+ struct trace_entry ent;
+ struct file_lock *fl;
+ long unsigned int i_ino;
+ dev_t s_dev;
+ struct file_lock *fl_blocker;
+ fl_owner_t fl_owner;
+ unsigned int fl_pid;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ loff_t fl_start;
+ loff_t fl_end;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_filelock_lease {
+ struct trace_entry ent;
+ struct file_lock *fl;
+ long unsigned int i_ino;
+ dev_t s_dev;
+ struct file_lock *fl_blocker;
+ fl_owner_t fl_owner;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ long unsigned int fl_break_time;
+ long unsigned int fl_downgrade_time;
+ char __data[0];
+};
+
+struct trace_event_raw_generic_add_lease {
+ struct trace_entry ent;
+ long unsigned int i_ino;
+ int wcount;
+ int rcount;
+ int icount;
+ dev_t s_dev;
+ fl_owner_t fl_owner;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ char __data[0];
+};
+
+struct trace_event_raw_leases_conflict {
+ struct trace_entry ent;
+ void *lease;
+ void *breaker;
+ unsigned int l_fl_flags;
+ unsigned int b_fl_flags;
+ unsigned char l_fl_type;
+ unsigned char b_fl_type;
+ bool conflict;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_locks_get_lock_context {};
+
+struct trace_event_data_offsets_filelock_lock {};
+
+struct trace_event_data_offsets_filelock_lease {};
+
+struct trace_event_data_offsets_generic_add_lease {};
+
+struct trace_event_data_offsets_leases_conflict {};
+
+typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *);
+
+typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int);
+
+typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int);
+
+typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int);
+
+typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int);
+
+typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lock *);
+
+typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lock *, struct file_lock *);
+
+struct file_lock_list_struct {
+ spinlock_t lock;
+ struct hlist_head hlist;
+};
+
+struct locks_iterator {
+ int li_cpu;
+ loff_t li_pos;
+};
+
+enum {
+ VERBOSE_STATUS = 1,
+};
+
+enum {
+ Enabled = 0,
+ Magic = 1,
+};
+
+typedef struct {
+ struct list_head list;
+ long unsigned int flags;
+ int offset;
+ int size;
+ char *magic;
+ char *mask;
+ const char *interpreter;
+ char *name;
+ struct dentry *dentry;
+ struct file *interp_file;
+} Node;
+
+typedef unsigned int __kernel_uid_t;
+
+typedef unsigned int __kernel_gid_t;
+
+typedef long unsigned int elf_greg_t;
+
+typedef elf_greg_t elf_gregset_t[27];
+
+struct elf64_note {
+ Elf64_Word n_namesz;
+ Elf64_Word n_descsz;
+ Elf64_Word n_type;
+};
+
+struct elf_siginfo {
+ int si_signo;
+ int si_code;
+ int si_errno;
+};
+
+struct elf_prstatus {
+ struct elf_siginfo pr_info;
+ short int pr_cursig;
+ long unsigned int pr_sigpend;
+ long unsigned int pr_sighold;
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct __kernel_old_timeval pr_utime;
+ struct __kernel_old_timeval pr_stime;
+ struct __kernel_old_timeval pr_cutime;
+ struct __kernel_old_timeval pr_cstime;
+ elf_gregset_t pr_reg;
+ int pr_fpvalid;
+};
+
+struct elf_prpsinfo {
+ char pr_state;
+ char pr_sname;
+ char pr_zomb;
+ char pr_nice;
+ long unsigned int pr_flag;
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ char pr_fname[16];
+ char pr_psargs[80];
+};
+
+struct arch_elf_state {};
+
+struct memelfnote {
+ const char *name;
+ int type;
+ unsigned int datasz;
+ void *data;
+};
+
+struct elf_thread_core_info {
+ struct elf_thread_core_info *next;
+ struct task_struct *task;
+ struct elf_prstatus prstatus;
+ struct memelfnote notes[0];
+};
+
+struct elf_note_info {
+ struct elf_thread_core_info *thread;
+ struct memelfnote psinfo;
+ struct memelfnote signote;
+ struct memelfnote auxv;
+ struct memelfnote files;
+ siginfo_t csigdata;
+ size_t size;
+ int thread_notes;
+};
+
+struct mb_cache_entry {
+ struct list_head e_list;
+ struct hlist_bl_node e_hash_list;
+ atomic_t e_refcnt;
+ u32 e_key;
+ u32 e_referenced: 1;
+ u32 e_reusable: 1;
+ u64 e_value;
+};
+
+struct mb_cache {
+ struct hlist_bl_head *c_hash;
+ int c_bucket_bits;
+ long unsigned int c_max_entries;
+ spinlock_t c_list_lock;
+ struct list_head c_list;
+ long unsigned int c_entry_count;
+ struct shrinker c_shrink;
+ struct work_struct c_shrink_work;
+};
+
+struct posix_acl_xattr_entry {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+};
+
+struct posix_acl_xattr_header {
+ __le32 a_version;
+};
+
+struct core_name {
+ char *corename;
+ int used;
+ int size;
+};
+
+struct trace_event_raw_iomap_readpage_class {
+ struct trace_entry ent;
+ dev_t dev;
+ u64 ino;
+ int nr_pages;
+ char __data[0];
+};
+
+struct trace_event_raw_iomap_range_class {
+ struct trace_entry ent;
+ dev_t dev;
+ u64 ino;
+ loff_t size;
+ long unsigned int offset;
+ unsigned int length;
+ char __data[0];
+};
+
+struct trace_event_raw_iomap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ u64 ino;
+ u64 addr;
+ loff_t offset;
+ u64 length;
+ u16 type;
+ u16 flags;
+ dev_t bdev;
+ char __data[0];
+};
+
+struct trace_event_raw_iomap_apply {
+ struct trace_entry ent;
+ dev_t dev;
+ u64 ino;
+ loff_t pos;
+ loff_t length;
+ unsigned int flags;
+ const void *ops;
+ void *actor;
+ long unsigned int caller;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_iomap_readpage_class {};
+
+struct trace_event_data_offsets_iomap_range_class {};
+
+struct trace_event_data_offsets_iomap_class {};
+
+struct trace_event_data_offsets_iomap_apply {};
+
+typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int);
+
+typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int);
+
+typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, long unsigned int, unsigned int);
+
+typedef void (*btf_trace_iomap_releasepage)(void *, struct inode *, long unsigned int, unsigned int);
+
+typedef void (*btf_trace_iomap_invalidatepage)(void *, struct inode *, long unsigned int, unsigned int);
+
+typedef void (*btf_trace_iomap_apply_dstmap)(void *, struct inode *, struct iomap *);
+
+typedef void (*btf_trace_iomap_apply_srcmap)(void *, struct inode *, struct iomap *);
+
+typedef void (*btf_trace_iomap_apply)(void *, struct inode *, loff_t, loff_t, unsigned int, const void *, void *, long unsigned int);
+
+typedef loff_t (*iomap_actor_t)(struct inode *, loff_t, loff_t, void *, struct iomap *, struct iomap *);
+
+struct iomap_ioend {
+ struct list_head io_list;
+ u16 io_type;
+ u16 io_flags;
+ struct inode *io_inode;
+ size_t io_size;
+ loff_t io_offset;
+ void *io_private;
+ struct bio *io_bio;
+ struct bio io_inline_bio;
+};
+
+struct iomap_writepage_ctx;
+
+struct iomap_writeback_ops {
+ int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t);
+ int (*prepare_ioend)(struct iomap_ioend *, int);
+ void (*discard_page)(struct page *);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct iomap_ioend *ioend;
+ const struct iomap_writeback_ops *ops;
+};
+
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ spinlock_t uptodate_lock;
+ long unsigned int uptodate[1];
+};
+
+struct iomap_readpage_ctx {
+ struct page *cur_page;
+ bool cur_page_in_bio;
+ struct bio *bio;
+ struct readahead_control *rac;
+};
+
+enum {
+ IOMAP_WRITE_F_UNSHARE = 1,
+};
+
+struct iomap_dio_ops {
+ int (*end_io)(struct kiocb *, ssize_t, int, unsigned int);
+ blk_qc_t (*submit_io)(struct inode *, struct iomap *, struct bio *, loff_t);
+};
+
+struct iomap_dio {
+ struct kiocb *iocb;
+ const struct iomap_dio_ops *dops;
+ loff_t i_size;
+ loff_t size;
+ atomic_t ref;
+ unsigned int flags;
+ int error;
+ bool wait_for_completion;
+ union {
+ struct {
+ struct iov_iter *iter;
+ struct task_struct *waiter;
+ struct request_queue *last_queue;
+ blk_qc_t cookie;
+ } submit;
+ struct {
+ struct work_struct work;
+ } aio;
+ };
+};
+
+struct fiemap_ctx {
+ struct fiemap_extent_info *fi;
+ struct iomap prev;
+};
+
+struct iomap_swapfile_info {
+ struct iomap iomap;
+ struct swap_info_struct *sis;
+ uint64_t lowest_ppage;
+ uint64_t highest_ppage;
+ long unsigned int nr_pages;
+ int nr_extents;
+};
+
+struct proc_maps_private {
+ struct inode *inode;
+ struct task_struct *task;
+ struct mm_struct *mm;
+ struct vm_area_struct *tail_vma;
+};
+
+struct mem_size_stats {
+ long unsigned int resident;
+ long unsigned int shared_clean;
+ long unsigned int shared_dirty;
+ long unsigned int private_clean;
+ long unsigned int private_dirty;
+ long unsigned int referenced;
+ long unsigned int anonymous;
+ long unsigned int lazyfree;
+ long unsigned int anonymous_thp;
+ long unsigned int shmem_thp;
+ long unsigned int file_thp;
+ long unsigned int swap;
+ long unsigned int shared_hugetlb;
+ long unsigned int private_hugetlb;
+ u64 pss;
+ u64 pss_anon;
+ u64 pss_file;
+ u64 pss_shmem;
+ u64 pss_locked;
+ u64 swap_pss;
+ bool check_shmem_swap;
+};
+
+enum clear_refs_types {
+ CLEAR_REFS_ALL = 1,
+ CLEAR_REFS_ANON = 2,
+ CLEAR_REFS_MAPPED = 3,
+ CLEAR_REFS_SOFT_DIRTY = 4,
+ CLEAR_REFS_MM_HIWATER_RSS = 5,
+ CLEAR_REFS_LAST = 6,
+};
+
+struct clear_refs_private {
+ enum clear_refs_types type;
+};
+
+typedef struct {
+ u64 pme;
+} pagemap_entry_t;
+
+struct pagemapread {
+ int pos;
+ int len;
+ pagemap_entry_t *buffer;
+ bool show_pfn;
+};
+
+struct pde_opener {
+ struct list_head lh;
+ struct file *file;
+ bool closing;
+ struct completion *c;
+};
+
+enum {
+ BIAS = -2147483648,
+};
+
+struct proc_fs_context {
+ struct pid_namespace *pid_ns;
+ unsigned int mask;
+ enum proc_hidepid hidepid;
+ int gid;
+ enum proc_pidonly pidonly;
+};
+
+enum proc_param {
+ Opt_gid___2 = 0,
+ Opt_hidepid = 1,
+ Opt_subset = 2,
+};
+
+struct genradix_root;
+
+struct __genradix {
+ struct genradix_root *root;
+};
+
+struct syscall_info {
+ __u64 sp;
+ struct seccomp_data data;
+};
+
+typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *);
+
+struct pid_entry {
+ const char *name;
+ unsigned int len;
+ umode_t mode;
+ const struct inode_operations *iop;
+ const struct file_operations *fop;
+ union proc_op op;
+};
+
+struct limit_names {
+ const char *name;
+ const char *unit;
+};
+
+struct map_files_info {
+ long unsigned int start;
+ long unsigned int end;
+ fmode_t mode;
+};
+
+struct tgid_iter {
+ unsigned int tgid;
+ struct task_struct *task;
+};
+
+struct fd_data {
+ fmode_t mode;
+ unsigned int fd;
+};
+
+struct sysctl_alias {
+ const char *kernel_param;
+ const char *sysctl_param;
+};
+
+struct seq_net_private {
+ struct net *net;
+};
+
+struct kernfs_iattrs {
+ kuid_t ia_uid;
+ kgid_t ia_gid;
+ struct timespec64 ia_atime;
+ struct timespec64 ia_mtime;
+ struct timespec64 ia_ctime;
+ struct simple_xattrs xattrs;
+ atomic_t nr_user_xattrs;
+ atomic_t user_xattr_size;
+};
+
+struct kernfs_super_info {
+ struct super_block *sb;
+ struct kernfs_root *root;
+ const void *ns;
+ struct list_head node;
+};
+
+enum kernfs_node_flag {
+ KERNFS_ACTIVATED = 16,
+ KERNFS_NS = 32,
+ KERNFS_HAS_SEQ_SHOW = 64,
+ KERNFS_HAS_MMAP = 128,
+ KERNFS_LOCKDEP = 256,
+ KERNFS_SUICIDAL = 1024,
+ KERNFS_SUICIDED = 2048,
+ KERNFS_EMPTY_DIR = 4096,
+ KERNFS_HAS_RELEASE = 8192,
+};
+
+struct kernfs_open_node {
+ atomic_t refcnt;
+ atomic_t event;
+ wait_queue_head_t poll;
+ struct list_head files;
+};
+
+struct pts_mount_opts {
+ int setuid;
+ int setgid;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+ umode_t ptmxmode;
+ int reserve;
+ int max;
+};
+
+enum {
+ Opt_uid___2 = 0,
+ Opt_gid___3 = 1,
+ Opt_mode___2 = 2,
+ Opt_ptmxmode = 3,
+ Opt_newinstance = 4,
+ Opt_max = 5,
+ Opt_err = 6,
+};
+
+struct pts_fs_info {
+ struct ida allocated_ptys;
+ struct pts_mount_opts mount_opts;
+ struct super_block *sb;
+ struct dentry *ptmx_dentry;
+};
+
+struct dcookie_struct {
+ struct path path;
+ struct list_head hash_list;
+};
+
+struct dcookie_user {
+ struct list_head next;
+};
+
+typedef unsigned int tid_t;
+
+struct transaction_chp_stats_s {
+ long unsigned int cs_chp_time;
+ __u32 cs_forced_to_close;
+ __u32 cs_written;
+ __u32 cs_dropped;
+};
+
+struct journal_s;
+
+typedef struct journal_s journal_t;
+
+struct journal_head;
+
+struct transaction_s;
+
+typedef struct transaction_s transaction_t;
+
+struct transaction_s {
+ journal_t *t_journal;
+ tid_t t_tid;
+ enum {
+ T_RUNNING = 0,
+ T_LOCKED = 1,
+ T_SWITCH = 2,
+ T_FLUSH = 3,
+ T_COMMIT = 4,
+ T_COMMIT_DFLUSH = 5,
+ T_COMMIT_JFLUSH = 6,
+ T_COMMIT_CALLBACK = 7,
+ T_FINISHED = 8,
+ } t_state;
+ long unsigned int t_log_start;
+ int t_nr_buffers;
+ struct journal_head *t_reserved_list;
+ struct journal_head *t_buffers;
+ struct journal_head *t_forget;
+ struct journal_head *t_checkpoint_list;
+ struct journal_head *t_checkpoint_io_list;
+ struct journal_head *t_shadow_list;
+ struct list_head t_inode_list;
+ spinlock_t t_handle_lock;
+ long unsigned int t_max_wait;
+ long unsigned int t_start;
+ long unsigned int t_requested;
+ struct transaction_chp_stats_s t_chp_stats;
+ atomic_t t_updates;
+ atomic_t t_outstanding_credits;
+ atomic_t t_outstanding_revokes;
+ atomic_t t_handle_count;
+ transaction_t *t_cpnext;
+ transaction_t *t_cpprev;
+ long unsigned int t_expires;
+ ktime_t t_start_time;
+ unsigned int t_synchronous_commit: 1;
+ int t_need_data_flush;
+ struct list_head t_private_list;
+};
+
+struct jbd2_buffer_trigger_type;
+
+struct journal_head {
+ struct buffer_head *b_bh;
+ spinlock_t b_state_lock;
+ int b_jcount;
+ unsigned int b_jlist;
+ unsigned int b_modified;
+ char *b_frozen_data;
+ char *b_committed_data;
+ transaction_t *b_transaction;
+ transaction_t *b_next_transaction;
+ struct journal_head *b_tnext;
+ struct journal_head *b_tprev;
+ transaction_t *b_cp_transaction;
+ struct journal_head *b_cpnext;
+ struct journal_head *b_cpprev;
+ struct jbd2_buffer_trigger_type *b_triggers;
+ struct jbd2_buffer_trigger_type *b_frozen_triggers;
+};
+
+struct jbd2_buffer_trigger_type {
+ void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t);
+ void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *);
+};
+
+struct crypto_alg;
+
+struct crypto_tfm {
+ u32 crt_flags;
+ void (*exit)(struct crypto_tfm *);
+ struct crypto_alg *__crt_alg;
+ void *__crt_ctx[0];
+};
+
+struct cipher_alg {
+ unsigned int cia_min_keysize;
+ unsigned int cia_max_keysize;
+ int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int);
+ void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *);
+ void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *);
+};
+
+struct compress_alg {
+ int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
+ int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
+};
+
+struct crypto_type;
+
+struct crypto_alg {
+ struct list_head cra_list;
+ struct list_head cra_users;
+ u32 cra_flags;
+ unsigned int cra_blocksize;
+ unsigned int cra_ctxsize;
+ unsigned int cra_alignmask;
+ int cra_priority;
+ refcount_t cra_refcnt;
+ char cra_name[128];
+ char cra_driver_name[128];
+ const struct crypto_type *cra_type;
+ union {
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ } cra_u;
+ int (*cra_init)(struct crypto_tfm *);
+ void (*cra_exit)(struct crypto_tfm *);
+ void (*cra_destroy)(struct crypto_alg *);
+ struct module *cra_module;
+};
+
+struct crypto_instance;
+
+struct crypto_type {
+ unsigned int (*ctxsize)(struct crypto_alg *, u32, u32);
+ unsigned int (*extsize)(struct crypto_alg *);
+ int (*init)(struct crypto_tfm *, u32, u32);
+ int (*init_tfm)(struct crypto_tfm *);
+ void (*show)(struct seq_file *, struct crypto_alg *);
+ int (*report)(struct sk_buff *, struct crypto_alg *);
+ void (*free)(struct crypto_instance *);
+ unsigned int type;
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+};
+
+struct crypto_shash {
+ unsigned int descsize;
+ struct crypto_tfm base;
+};
+
+struct jbd2_journal_handle;
+
+typedef struct jbd2_journal_handle handle_t;
+
+struct jbd2_journal_handle {
+ union {
+ transaction_t *h_transaction;
+ journal_t *h_journal;
+ };
+ handle_t *h_rsv_handle;
+ int h_total_credits;
+ int h_revoke_credits;
+ int h_revoke_credits_requested;
+ int h_ref;
+ int h_err;
+ unsigned int h_sync: 1;
+ unsigned int h_jdata: 1;
+ unsigned int h_reserved: 1;
+ unsigned int h_aborted: 1;
+ unsigned int h_type: 8;
+ unsigned int h_line_no: 16;
+ long unsigned int h_start_jiffies;
+ unsigned int h_requested_credits;
+ unsigned int saved_alloc_context;
+};
+
+struct transaction_run_stats_s {
+ long unsigned int rs_wait;
+ long unsigned int rs_request_delay;
+ long unsigned int rs_running;
+ long unsigned int rs_locked;
+ long unsigned int rs_flushing;
+ long unsigned int rs_logging;
+ __u32 rs_handle_count;
+ __u32 rs_blocks;
+ __u32 rs_blocks_logged;
+};
+
+struct transaction_stats_s {
+ long unsigned int ts_tid;
+ long unsigned int ts_requested;
+ struct transaction_run_stats_s run;
+};
+
+struct journal_superblock_s;
+
+typedef struct journal_superblock_s journal_superblock_t;
+
+struct jbd2_revoke_table_s;
+
+struct journal_s {
+ long unsigned int j_flags;
+ int j_errno;
+ struct mutex j_abort_mutex;
+ struct buffer_head *j_sb_buffer;
+ journal_superblock_t *j_superblock;
+ int j_format_version;
+ rwlock_t j_state_lock;
+ int j_barrier_count;
+ struct mutex j_barrier;
+ transaction_t *j_running_transaction;
+ transaction_t *j_committing_transaction;
+ transaction_t *j_checkpoint_transactions;
+ wait_queue_head_t j_wait_transaction_locked;
+ wait_queue_head_t j_wait_done_commit;
+ wait_queue_head_t j_wait_commit;
+ wait_queue_head_t j_wait_updates;
+ wait_queue_head_t j_wait_reserved;
+ struct mutex j_checkpoint_mutex;
+ struct buffer_head *j_chkpt_bhs[64];
+ long unsigned int j_head;
+ long unsigned int j_tail;
+ long unsigned int j_free;
+ long unsigned int j_first;
+ long unsigned int j_last;
+ struct block_device *j_dev;
+ int j_blocksize;
+ long long unsigned int j_blk_offset;
+ char j_devname[56];
+ struct block_device *j_fs_dev;
+ unsigned int j_maxlen;
+ atomic_t j_reserved_credits;
+ spinlock_t j_list_lock;
+ struct inode *j_inode;
+ tid_t j_tail_sequence;
+ tid_t j_transaction_sequence;
+ tid_t j_commit_sequence;
+ tid_t j_commit_request;
+ __u8 j_uuid[16];
+ struct task_struct *j_task;
+ int j_max_transaction_buffers;
+ int j_revoke_records_per_block;
+ long unsigned int j_commit_interval;
+ struct timer_list j_commit_timer;
+ spinlock_t j_revoke_lock;
+ struct jbd2_revoke_table_s *j_revoke;
+ struct jbd2_revoke_table_s *j_revoke_table[2];
+ struct buffer_head **j_wbuf;
+ int j_wbufsize;
+ pid_t j_last_sync_writer;
+ u64 j_average_commit_time;
+ u32 j_min_batch_time;
+ u32 j_max_batch_time;
+ void (*j_commit_callback)(journal_t *, transaction_t *);
+ spinlock_t j_history_lock;
+ struct proc_dir_entry *j_proc_entry;
+ struct transaction_stats_s j_stats;
+ unsigned int j_failed_commit;
+ void *j_private;
+ struct crypto_shash *j_chksum_driver;
+ __u32 j_csum_seed;
+ struct lockdep_map j_trans_commit_map;
+};
+
+struct journal_header_s {
+ __be32 h_magic;
+ __be32 h_blocktype;
+ __be32 h_sequence;
+};
+
+typedef struct journal_header_s journal_header_t;
+
+struct journal_superblock_s {
+ journal_header_t s_header;
+ __be32 s_blocksize;
+ __be32 s_maxlen;
+ __be32 s_first;
+ __be32 s_sequence;
+ __be32 s_start;
+ __be32 s_errno;
+ __be32 s_feature_compat;
+ __be32 s_feature_incompat;
+ __be32 s_feature_ro_compat;
+ __u8 s_uuid[16];
+ __be32 s_nr_users;
+ __be32 s_dynsuper;
+ __be32 s_max_transaction;
+ __be32 s_max_trans_data;
+ __u8 s_checksum_type;
+ __u8 s_padding2[3];
+ __u32 s_padding[42];
+ __be32 s_checksum;
+ __u8 s_users[768];
+};
+
+enum jbd_state_bits {
+ BH_JBD = 16,
+ BH_JWrite = 17,
+ BH_Freed = 18,
+ BH_Revoked = 19,
+ BH_RevokeValid = 20,
+ BH_JBDDirty = 21,
+ BH_JournalHead = 22,
+ BH_Shadow = 23,
+ BH_Verified = 24,
+ BH_JBDPrivateStart = 25,
+};
+
+struct jbd2_inode {
+ transaction_t *i_transaction;
+ transaction_t *i_next_transaction;
+ struct list_head i_list;
+ struct inode *i_vfs_inode;
+ long unsigned int i_flags;
+ loff_t i_dirty_start;
+ loff_t i_dirty_end;
+};
+
+struct bgl_lock {
+ spinlock_t lock;
+};
+
+struct blockgroup_lock {
+ struct bgl_lock locks[128];
+};
+
+struct fscrypt_dummy_context {};
+
+struct fsverity_operations {
+ int (*begin_enable_verity)(struct file *);
+ int (*end_enable_verity)(struct file *, const void *, size_t, u64);
+ int (*get_verity_descriptor)(struct inode *, void *, size_t);
+ struct page * (*read_merkle_tree_page)(struct inode *, long unsigned int, long unsigned int);
+ int (*write_merkle_tree_block)(struct inode *, const void *, u64, int);
+};
+
+typedef int ext4_grpblk_t;
+
+typedef long long unsigned int ext4_fsblk_t;
+
+typedef __u32 ext4_lblk_t;
+
+typedef unsigned int ext4_group_t;
+
+struct ext4_allocation_request {
+ struct inode *inode;
+ unsigned int len;
+ ext4_lblk_t logical;
+ ext4_lblk_t lleft;
+ ext4_lblk_t lright;
+ ext4_fsblk_t goal;
+ ext4_fsblk_t pleft;
+ ext4_fsblk_t pright;
+ unsigned int flags;
+};
+
+struct ext4_system_blocks {
+ struct rb_root root;
+ struct callback_head rcu;
+};
+
+struct ext4_group_desc {
+ __le32 bg_block_bitmap_lo;
+ __le32 bg_inode_bitmap_lo;
+ __le32 bg_inode_table_lo;
+ __le16 bg_free_blocks_count_lo;
+ __le16 bg_free_inodes_count_lo;
+ __le16 bg_used_dirs_count_lo;
+ __le16 bg_flags;
+ __le32 bg_exclude_bitmap_lo;
+ __le16 bg_block_bitmap_csum_lo;
+ __le16 bg_inode_bitmap_csum_lo;
+ __le16 bg_itable_unused_lo;
+ __le16 bg_checksum;
+ __le32 bg_block_bitmap_hi;
+ __le32 bg_inode_bitmap_hi;
+ __le32 bg_inode_table_hi;
+ __le16 bg_free_blocks_count_hi;
+ __le16 bg_free_inodes_count_hi;
+ __le16 bg_used_dirs_count_hi;
+ __le16 bg_itable_unused_hi;
+ __le32 bg_exclude_bitmap_hi;
+ __le16 bg_block_bitmap_csum_hi;
+ __le16 bg_inode_bitmap_csum_hi;
+ __u32 bg_reserved;
+};
+
+struct flex_groups {
+ atomic64_t free_clusters;
+ atomic_t free_inodes;
+ atomic_t used_dirs;
+};
+
+struct extent_status {
+ struct rb_node rb_node;
+ ext4_lblk_t es_lblk;
+ ext4_lblk_t es_len;
+ ext4_fsblk_t es_pblk;
+};
+
+struct ext4_es_tree {
+ struct rb_root root;
+ struct extent_status *cache_es;
+};
+
+struct ext4_es_stats {
+ long unsigned int es_stats_shrunk;
+ struct percpu_counter es_stats_cache_hits;
+ struct percpu_counter es_stats_cache_misses;
+ u64 es_stats_scan_time;
+ u64 es_stats_max_scan_time;
+ struct percpu_counter es_stats_all_cnt;
+ struct percpu_counter es_stats_shk_cnt;
+};
+
+struct ext4_pending_tree {
+ struct rb_root root;
+};
+
+struct ext4_inode_info {
+ __le32 i_data[15];
+ __u32 i_dtime;
+ ext4_fsblk_t i_file_acl;
+ ext4_group_t i_block_group;
+ ext4_lblk_t i_dir_start_lookup;
+ long unsigned int i_flags;
+ struct rw_semaphore xattr_sem;
+ struct list_head i_orphan;
+ loff_t i_disksize;
+ struct rw_semaphore i_data_sem;
+ struct rw_semaphore i_mmap_sem;
+ struct inode vfs_inode;
+ struct jbd2_inode *jinode;
+ spinlock_t i_raw_lock;
+ struct timespec64 i_crtime;
+ struct list_head i_prealloc_list;
+ spinlock_t i_prealloc_lock;
+ struct ext4_es_tree i_es_tree;
+ rwlock_t i_es_lock;
+ struct list_head i_es_list;
+ unsigned int i_es_all_nr;
+ unsigned int i_es_shk_nr;
+ ext4_lblk_t i_es_shrink_lblk;
+ ext4_group_t i_last_alloc_group;
+ unsigned int i_reserved_data_blocks;
+ struct ext4_pending_tree i_pending_tree;
+ __u16 i_extra_isize;
+ u16 i_inline_off;
+ u16 i_inline_size;
+ spinlock_t i_completed_io_lock;
+ struct list_head i_rsv_conversion_list;
+ struct work_struct i_rsv_conversion_work;
+ atomic_t i_unwritten;
+ spinlock_t i_block_reservation_lock;
+ tid_t i_sync_tid;
+ tid_t i_datasync_tid;
+ __u32 i_csum_seed;
+ kprojid_t i_projid;
+};
+
+struct ext4_super_block {
+ __le32 s_inodes_count;
+ __le32 s_blocks_count_lo;
+ __le32 s_r_blocks_count_lo;
+ __le32 s_free_blocks_count_lo;
+ __le32 s_free_inodes_count;
+ __le32 s_first_data_block;
+ __le32 s_log_block_size;
+ __le32 s_log_cluster_size;
+ __le32 s_blocks_per_group;
+ __le32 s_clusters_per_group;
+ __le32 s_inodes_per_group;
+ __le32 s_mtime;
+ __le32 s_wtime;
+ __le16 s_mnt_count;
+ __le16 s_max_mnt_count;
+ __le16 s_magic;
+ __le16 s_state;
+ __le16 s_errors;
+ __le16 s_minor_rev_level;
+ __le32 s_lastcheck;
+ __le32 s_checkinterval;
+ __le32 s_creator_os;
+ __le32 s_rev_level;
+ __le16 s_def_resuid;
+ __le16 s_def_resgid;
+ __le32 s_first_ino;
+ __le16 s_inode_size;
+ __le16 s_block_group_nr;
+ __le32 s_feature_compat;
+ __le32 s_feature_incompat;
+ __le32 s_feature_ro_compat;
+ __u8 s_uuid[16];
+ char s_volume_name[16];
+ char s_last_mounted[64];
+ __le32 s_algorithm_usage_bitmap;
+ __u8 s_prealloc_blocks;
+ __u8 s_prealloc_dir_blocks;
+ __le16 s_reserved_gdt_blocks;
+ __u8 s_journal_uuid[16];
+ __le32 s_journal_inum;
+ __le32 s_journal_dev;
+ __le32 s_last_orphan;
+ __le32 s_hash_seed[4];
+ __u8 s_def_hash_version;
+ __u8 s_jnl_backup_type;
+ __le16 s_desc_size;
+ __le32 s_default_mount_opts;
+ __le32 s_first_meta_bg;
+ __le32 s_mkfs_time;
+ __le32 s_jnl_blocks[17];
+ __le32 s_blocks_count_hi;
+ __le32 s_r_blocks_count_hi;
+ __le32 s_free_blocks_count_hi;
+ __le16 s_min_extra_isize;
+ __le16 s_want_extra_isize;
+ __le32 s_flags;
+ __le16 s_raid_stride;
+ __le16 s_mmp_update_interval;
+ __le64 s_mmp_block;
+ __le32 s_raid_stripe_width;
+ __u8 s_log_groups_per_flex;
+ __u8 s_checksum_type;
+ __u8 s_encryption_level;
+ __u8 s_reserved_pad;
+ __le64 s_kbytes_written;
+ __le32 s_snapshot_inum;
+ __le32 s_snapshot_id;
+ __le64 s_snapshot_r_blocks_count;
+ __le32 s_snapshot_list;
+ __le32 s_error_count;
+ __le32 s_first_error_time;
+ __le32 s_first_error_ino;
+ __le64 s_first_error_block;
+ __u8 s_first_error_func[32];
+ __le32 s_first_error_line;
+ __le32 s_last_error_time;
+ __le32 s_last_error_ino;
+ __le32 s_last_error_line;
+ __le64 s_last_error_block;
+ __u8 s_last_error_func[32];
+ __u8 s_mount_opts[64];
+ __le32 s_usr_quota_inum;
+ __le32 s_grp_quota_inum;
+ __le32 s_overhead_clusters;
+ __le32 s_backup_bgs[2];
+ __u8 s_encrypt_algos[4];
+ __u8 s_encrypt_pw_salt[16];
+ __le32 s_lpf_ino;
+ __le32 s_prj_quota_inum;
+ __le32 s_checksum_seed;
+ __u8 s_wtime_hi;
+ __u8 s_mtime_hi;
+ __u8 s_mkfs_time_hi;
+ __u8 s_lastcheck_hi;
+ __u8 s_first_error_time_hi;
+ __u8 s_last_error_time_hi;
+ __u8 s_first_error_errcode;
+ __u8 s_last_error_errcode;
+ __le16 s_encoding;
+ __le16 s_encoding_flags;
+ __le32 s_reserved[95];
+ __le32 s_checksum;
+};
+
+struct mb_cache___2;
+
+struct ext4_group_info;
+
+struct ext4_locality_group;
+
+struct ext4_li_request;
+
+struct ext4_sb_info {
+ long unsigned int s_desc_size;
+ long unsigned int s_inodes_per_block;
+ long unsigned int s_blocks_per_group;
+ long unsigned int s_clusters_per_group;
+ long unsigned int s_inodes_per_group;
+ long unsigned int s_itb_per_group;
+ long unsigned int s_gdb_count;
+ long unsigned int s_desc_per_block;
+ ext4_group_t s_groups_count;
+ ext4_group_t s_blockfile_groups;
+ long unsigned int s_overhead;
+ unsigned int s_cluster_ratio;
+ unsigned int s_cluster_bits;
+ loff_t s_bitmap_maxbytes;
+ struct buffer_head *s_sbh;
+ struct ext4_super_block *s_es;
+ struct buffer_head **s_group_desc;
+ unsigned int s_mount_opt;
+ unsigned int s_mount_opt2;
+ unsigned int s_mount_flags;
+ unsigned int s_def_mount_opt;
+ ext4_fsblk_t s_sb_block;
+ atomic64_t s_resv_clusters;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ short unsigned int s_mount_state;
+ short unsigned int s_pad;
+ int s_addr_per_block_bits;
+ int s_desc_per_block_bits;
+ int s_inode_size;
+ int s_first_ino;
+ unsigned int s_inode_readahead_blks;
+ unsigned int s_inode_goal;
+ u32 s_hash_seed[4];
+ int s_def_hash_version;
+ int s_hash_unsigned;
+ struct percpu_counter s_freeclusters_counter;
+ struct percpu_counter s_freeinodes_counter;
+ struct percpu_counter s_dirs_counter;
+ struct percpu_counter s_dirtyclusters_counter;
+ struct blockgroup_lock *s_blockgroup_lock;
+ struct proc_dir_entry *s_proc;
+ struct kobject s_kobj;
+ struct completion s_kobj_unregister;
+ struct super_block *s_sb;
+ struct journal_s *s_journal;
+ struct list_head s_orphan;
+ struct mutex s_orphan_lock;
+ long unsigned int s_ext4_flags;
+ long unsigned int s_commit_interval;
+ u32 s_max_batch_time;
+ u32 s_min_batch_time;
+ struct block_device *journal_bdev;
+ unsigned int s_want_extra_isize;
+ struct ext4_system_blocks *system_blks;
+ struct ext4_group_info ***s_group_info;
+ struct inode *s_buddy_cache;
+ spinlock_t s_md_lock;
+ short unsigned int *s_mb_offsets;
+ unsigned int *s_mb_maxs;
+ unsigned int s_group_info_size;
+ unsigned int s_mb_free_pending;
+ struct list_head s_freed_data_list;
+ long unsigned int s_stripe;
+ unsigned int s_mb_stream_request;
+ unsigned int s_mb_max_to_scan;
+ unsigned int s_mb_min_to_scan;
+ unsigned int s_mb_stats;
+ unsigned int s_mb_order2_reqs;
+ unsigned int s_mb_group_prealloc;
+ unsigned int s_max_dir_size_kb;
+ long unsigned int s_mb_last_group;
+ long unsigned int s_mb_last_start;
+ atomic_t s_bal_reqs;
+ atomic_t s_bal_success;
+ atomic_t s_bal_allocated;
+ atomic_t s_bal_ex_scanned;
+ atomic_t s_bal_goals;
+ atomic_t s_bal_breaks;
+ atomic_t s_bal_2orders;
+ spinlock_t s_bal_lock;
+ long unsigned int s_mb_buddies_generated;
+ long long unsigned int s_mb_generation_time;
+ atomic_t s_mb_lost_chunks;
+ atomic_t s_mb_preallocated;
+ atomic_t s_mb_discarded;
+ atomic_t s_lock_busy;
+ struct ext4_locality_group *s_locality_groups;
+ long unsigned int s_sectors_written_start;
+ u64 s_kbytes_written;
+ unsigned int s_extent_max_zeroout_kb;
+ unsigned int s_log_groups_per_flex;
+ struct flex_groups **s_flex_groups;
+ ext4_group_t s_flex_groups_allocated;
+ struct workqueue_struct *rsv_conversion_wq;
+ struct timer_list s_err_report;
+ struct ext4_li_request *s_li_request;
+ unsigned int s_li_wait_mult;
+ struct task_struct *s_mmp_tsk;
+ atomic_t s_last_trim_minblks;
+ struct crypto_shash *s_chksum_driver;
+ __u32 s_csum_seed;
+ struct shrinker s_es_shrinker;
+ struct list_head s_es_list;
+ long int s_es_nr_inode;
+ struct ext4_es_stats s_es_stats;
+ struct mb_cache___2 *s_ea_block_cache;
+ struct mb_cache___2 *s_ea_inode_cache;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ spinlock_t s_es_lock;
+ struct ratelimit_state s_err_ratelimit_state;
+ struct ratelimit_state s_warning_ratelimit_state;
+ struct ratelimit_state s_msg_ratelimit_state;
+ struct fscrypt_dummy_context s_dummy_enc_ctx;
+ struct percpu_rw_semaphore s_writepages_rwsem;
+ struct dax_device *s_daxdev;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct ext4_group_info {
+ long unsigned int bb_state;
+ struct rb_root bb_free_root;
+ ext4_grpblk_t bb_first_free;
+ ext4_grpblk_t bb_free;
+ ext4_grpblk_t bb_fragments;
+ ext4_grpblk_t bb_largest_free_order;
+ struct list_head bb_prealloc_list;
+ struct rw_semaphore alloc_sem;
+ ext4_grpblk_t bb_counters[0];
+};
+
+struct ext4_locality_group {
+ struct mutex lg_mutex;
+ struct list_head lg_prealloc_list[10];
+ spinlock_t lg_prealloc_lock;
+};
+
+struct ext4_li_request {
+ struct super_block *lr_super;
+ struct ext4_sb_info *lr_sbi;
+ ext4_group_t lr_next_group;
+ struct list_head lr_request;
+ long unsigned int lr_next_sched;
+ long unsigned int lr_timeout;
+};
+
+struct iomap_ops___2;
+
+struct shash_desc {
+ struct crypto_shash *tfm;
+ void *__ctx[0];
+};
+
+struct ext4_map_blocks {
+ ext4_fsblk_t m_pblk;
+ ext4_lblk_t m_lblk;
+ unsigned int m_len;
+ unsigned int m_flags;
+};
+
+struct ext4_system_zone {
+ struct rb_node node;
+ ext4_fsblk_t start_blk;
+ unsigned int count;
+};
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+enum {
+ EXT4_INODE_SECRM = 0,
+ EXT4_INODE_UNRM = 1,
+ EXT4_INODE_COMPR = 2,
+ EXT4_INODE_SYNC = 3,
+ EXT4_INODE_IMMUTABLE = 4,
+ EXT4_INODE_APPEND = 5,
+ EXT4_INODE_NODUMP = 6,
+ EXT4_INODE_NOATIME = 7,
+ EXT4_INODE_DIRTY = 8,
+ EXT4_INODE_COMPRBLK = 9,
+ EXT4_INODE_NOCOMPR = 10,
+ EXT4_INODE_ENCRYPT = 11,
+ EXT4_INODE_INDEX = 12,
+ EXT4_INODE_IMAGIC = 13,
+ EXT4_INODE_JOURNAL_DATA = 14,
+ EXT4_INODE_NOTAIL = 15,
+ EXT4_INODE_DIRSYNC = 16,
+ EXT4_INODE_TOPDIR = 17,
+ EXT4_INODE_HUGE_FILE = 18,
+ EXT4_INODE_EXTENTS = 19,
+ EXT4_INODE_VERITY = 20,
+ EXT4_INODE_EA_INODE = 21,
+ EXT4_INODE_DAX = 25,
+ EXT4_INODE_INLINE_DATA = 28,
+ EXT4_INODE_PROJINHERIT = 29,
+ EXT4_INODE_CASEFOLD = 30,
+ EXT4_INODE_RESERVED = 31,
+};
+
+struct ext4_dir_entry_2 {
+ __le32 inode;
+ __le16 rec_len;
+ __u8 name_len;
+ __u8 file_type;
+ char name[255];
+};
+
+struct fname;
+
+struct dir_private_info {
+ struct rb_root root;
+ struct rb_node *curr_node;
+ struct fname *extra_fname;
+ loff_t last_pos;
+ __u32 curr_hash;
+ __u32 curr_minor_hash;
+ __u32 next_hash;
+};
+
+struct fname {
+ __u32 hash;
+ __u32 minor_hash;
+ struct rb_node rb_hash;
+ struct fname *next;
+ __u32 inode;
+ __u8 name_len;
+ __u8 file_type;
+ char name[0];
+};
+
+enum SHIFT_DIRECTION {
+ SHIFT_LEFT = 0,
+ SHIFT_RIGHT = 1,
+};
+
+struct ext4_io_end_vec {
+ struct list_head list;
+ loff_t offset;
+ ssize_t size;
+};
+
+struct ext4_io_end {
+ struct list_head list;
+ handle_t *handle;
+ struct inode *inode;
+ struct bio *bio;
+ unsigned int flag;
+ atomic_t count;
+ struct list_head list_vec;
+};
+
+typedef struct ext4_io_end ext4_io_end_t;
+
+enum {
+ ES_WRITTEN_B = 0,
+ ES_UNWRITTEN_B = 1,
+ ES_DELAYED_B = 2,
+ ES_HOLE_B = 3,
+ ES_REFERENCED_B = 4,
+ ES_FLAGS = 5,
+};
+
+enum {
+ EXT4_STATE_JDATA = 0,
+ EXT4_STATE_NEW = 1,
+ EXT4_STATE_XATTR = 2,
+ EXT4_STATE_NO_EXPAND = 3,
+ EXT4_STATE_DA_ALLOC_CLOSE = 4,
+ EXT4_STATE_EXT_MIGRATE = 5,
+ EXT4_STATE_NEWENTRY = 6,
+ EXT4_STATE_MAY_INLINE_DATA = 7,
+ EXT4_STATE_EXT_PRECACHED = 8,
+ EXT4_STATE_LUSTRE_EA_INODE = 9,
+ EXT4_STATE_VERITY_IN_PROGRESS = 10,
+};
+
+struct ext4_iloc {
+ struct buffer_head *bh;
+ long unsigned int offset;
+ ext4_group_t block_group;
+};
+
+struct ext4_extent_tail {
+ __le32 et_checksum;
+};
+
+struct ext4_extent {
+ __le32 ee_block;
+ __le16 ee_len;
+ __le16 ee_start_hi;
+ __le32 ee_start_lo;
+};
+
+struct ext4_extent_idx {
+ __le32 ei_block;
+ __le32 ei_leaf_lo;
+ __le16 ei_leaf_hi;
+ __u16 ei_unused;
+};
+
+struct ext4_extent_header {
+ __le16 eh_magic;
+ __le16 eh_entries;
+ __le16 eh_max;
+ __le16 eh_depth;
+ __le32 eh_generation;
+};
+
+struct ext4_ext_path {
+ ext4_fsblk_t p_block;
+ __u16 p_depth;
+ __u16 p_maxdepth;
+ struct ext4_extent *p_ext;
+ struct ext4_extent_idx *p_idx;
+ struct ext4_extent_header *p_hdr;
+ struct buffer_head *p_bh;
+};
+
+struct partial_cluster {
+ ext4_fsblk_t pclu;
+ ext4_lblk_t lblk;
+ enum {
+ initial = 0,
+ tofree = 1,
+ nofree = 2,
+ } state;
+};
+
+struct pending_reservation {
+ struct rb_node rb_node;
+ ext4_lblk_t lclu;
+};
+
+struct rsvd_count {
+ int ndelonly;
+ bool first_do_lblk_found;
+ ext4_lblk_t first_do_lblk;
+ ext4_lblk_t last_do_lblk;
+ struct extent_status *left_es;
+ bool partial;
+ ext4_lblk_t lclu;
+};
+
+struct fsverity_info;
+
+struct fsmap {
+ __u32 fmr_device;
+ __u32 fmr_flags;
+ __u64 fmr_physical;
+ __u64 fmr_owner;
+ __u64 fmr_offset;
+ __u64 fmr_length;
+ __u64 fmr_reserved[3];
+};
+
+struct ext4_fsmap {
+ struct list_head fmr_list;
+ dev_t fmr_device;
+ uint32_t fmr_flags;
+ uint64_t fmr_physical;
+ uint64_t fmr_owner;
+ uint64_t fmr_length;
+};
+
+struct ext4_fsmap_head {
+ uint32_t fmh_iflags;
+ uint32_t fmh_oflags;
+ unsigned int fmh_count;
+ unsigned int fmh_entries;
+ struct ext4_fsmap fmh_keys[2];
+};
+
+typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *);
+
+struct ext4_getfsmap_info {
+ struct ext4_fsmap_head *gfi_head;
+ ext4_fsmap_format_t gfi_formatter;
+ void *gfi_format_arg;
+ ext4_fsblk_t gfi_next_fsblk;
+ u32 gfi_dev;
+ ext4_group_t gfi_agno;
+ struct ext4_fsmap gfi_low;
+ struct ext4_fsmap gfi_high;
+ struct ext4_fsmap gfi_lastfree;
+ struct list_head gfi_meta_list;
+ bool gfi_last;
+};
+
+struct ext4_getfsmap_dev {
+ int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *);
+ u32 gfd_dev;
+};
+
+struct dx_hash_info {
+ u32 hash;
+ u32 minor_hash;
+ int hash_version;
+ u32 *seed;
+};
+
+struct ext4_inode {
+ __le16 i_mode;
+ __le16 i_uid;
+ __le32 i_size_lo;
+ __le32 i_atime;
+ __le32 i_ctime;
+ __le32 i_mtime;
+ __le32 i_dtime;
+ __le16 i_gid;
+ __le16 i_links_count;
+ __le32 i_blocks_lo;
+ __le32 i_flags;
+ union {
+ struct {
+ __le32 l_i_version;
+ } linux1;
+ struct {
+ __u32 h_i_translator;
+ } hurd1;
+ struct {
+ __u32 m_i_reserved1;
+ } masix1;
+ } osd1;
+ __le32 i_block[15];
+ __le32 i_generation;
+ __le32 i_file_acl_lo;
+ __le32 i_size_high;
+ __le32 i_obso_faddr;
+ union {
+ struct {
+ __le16 l_i_blocks_high;
+ __le16 l_i_file_acl_high;
+ __le16 l_i_uid_high;
+ __le16 l_i_gid_high;
+ __le16 l_i_checksum_lo;
+ __le16 l_i_reserved;
+ } linux2;
+ struct {
+ __le16 h_i_reserved1;
+ __u16 h_i_mode_high;
+ __u16 h_i_uid_high;
+ __u16 h_i_gid_high;
+ __u32 h_i_author;
+ } hurd2;
+ struct {
+ __le16 h_i_reserved1;
+ __le16 m_i_file_acl_high;
+ __u32 m_i_reserved2[2];
+ } masix2;
+ } osd2;
+ __le16 i_extra_isize;
+ __le16 i_checksum_hi;
+ __le32 i_ctime_extra;
+ __le32 i_mtime_extra;
+ __le32 i_atime_extra;
+ __le32 i_crtime;
+ __le32 i_crtime_extra;
+ __le32 i_version_hi;
+ __le32 i_projid;
+};
+
+struct orlov_stats {
+ __u64 free_clusters;
+ __u32 free_inodes;
+ __u32 used_dirs;
+};
+
+typedef struct {
+ __le32 *p;
+ __le32 key;
+ struct buffer_head *bh;
+} Indirect;
+
+struct ext4_filename {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ struct dx_hash_info hinfo;
+};
+
+struct ext4_xattr_ibody_header {
+ __le32 h_magic;
+};
+
+struct ext4_xattr_entry {
+ __u8 e_name_len;
+ __u8 e_name_index;
+ __le16 e_value_offs;
+ __le32 e_value_inum;
+ __le32 e_value_size;
+ __le32 e_hash;
+ char e_name[0];
+};
+
+struct ext4_xattr_info {
+ const char *name;
+ const void *value;
+ size_t value_len;
+ int name_index;
+ int in_inode;
+};
+
+struct ext4_xattr_search {
+ struct ext4_xattr_entry *first;
+ void *base;
+ void *end;
+ struct ext4_xattr_entry *here;
+ int not_found;
+};
+
+struct ext4_xattr_ibody_find {
+ struct ext4_xattr_search s;
+ struct ext4_iloc iloc;
+};
+
+typedef short unsigned int __kernel_uid16_t;
+
+typedef short unsigned int __kernel_gid16_t;
+
+typedef __kernel_uid16_t uid16_t;
+
+typedef __kernel_gid16_t gid16_t;
+
+typedef s32 int32_t;
+
+struct ext4_io_submit {
+ struct writeback_control *io_wbc;
+ struct bio *io_bio;
+ ext4_io_end_t *io_end;
+ sector_t io_next_block;
+};
+
+typedef enum {
+ EXT4_IGET_NORMAL = 0,
+ EXT4_IGET_SPECIAL = 1,
+ EXT4_IGET_HANDLE = 2,
+} ext4_iget_flags;
+
+struct ext4_xattr_inode_array {
+ unsigned int count;
+ struct inode *inodes[0];
+};
+
+struct mpage_da_data {
+ struct inode *inode;
+ struct writeback_control *wbc;
+ long unsigned int first_page;
+ long unsigned int next_page;
+ long unsigned int last_page;
+ struct ext4_map_blocks map;
+ struct ext4_io_submit io_submit;
+ unsigned int do_map: 1;
+ unsigned int scanned_until_end: 1;
+};
+
+struct fstrim_range {
+ __u64 start;
+ __u64 len;
+ __u64 minlen;
+};
+
+struct ext4_new_group_input {
+ __u32 group;
+ __u64 block_bitmap;
+ __u64 inode_bitmap;
+ __u64 inode_table;
+ __u32 blocks_count;
+ __u16 reserved_blocks;
+ __u16 unused;
+};
+
+struct ext4_new_group_data {
+ __u32 group;
+ __u64 block_bitmap;
+ __u64 inode_bitmap;
+ __u64 inode_table;
+ __u32 blocks_count;
+ __u16 reserved_blocks;
+ __u16 mdata_blocks;
+ __u32 free_clusters_count;
+};
+
+struct move_extent {
+ __u32 reserved;
+ __u32 donor_fd;
+ __u64 orig_start;
+ __u64 donor_start;
+ __u64 len;
+ __u64 moved_len;
+};
+
+struct fsmap_head {
+ __u32 fmh_iflags;
+ __u32 fmh_oflags;
+ __u32 fmh_count;
+ __u32 fmh_entries;
+ __u64 fmh_reserved[6];
+ struct fsmap fmh_keys[2];
+ struct fsmap fmh_recs[0];
+};
+
+struct getfsmap_info {
+ struct super_block *gi_sb;
+ struct fsmap_head *gi_data;
+ unsigned int gi_idx;
+ __u32 gi_last_flags;
+};
+
+struct ext4_free_data {
+ struct list_head efd_list;
+ struct rb_node efd_node;
+ ext4_group_t efd_group;
+ ext4_grpblk_t efd_start_cluster;
+ ext4_grpblk_t efd_count;
+ tid_t efd_tid;
+};
+
+struct ext4_prealloc_space {
+ struct list_head pa_inode_list;
+ struct list_head pa_group_list;
+ union {
+ struct list_head pa_tmp_list;
+ struct callback_head pa_rcu;
+ } u;
+ spinlock_t pa_lock;
+ atomic_t pa_count;
+ unsigned int pa_deleted;
+ ext4_fsblk_t pa_pstart;
+ ext4_lblk_t pa_lstart;
+ ext4_grpblk_t pa_len;
+ ext4_grpblk_t pa_free;
+ short unsigned int pa_type;
+ spinlock_t *pa_obj_lock;
+ struct inode *pa_inode;
+};
+
+enum {
+ MB_INODE_PA = 0,
+ MB_GROUP_PA = 1,
+};
+
+struct ext4_free_extent {
+ ext4_lblk_t fe_logical;
+ ext4_grpblk_t fe_start;
+ ext4_group_t fe_group;
+ ext4_grpblk_t fe_len;
+};
+
+struct ext4_allocation_context {
+ struct inode *ac_inode;
+ struct super_block *ac_sb;
+ struct ext4_free_extent ac_o_ex;
+ struct ext4_free_extent ac_g_ex;
+ struct ext4_free_extent ac_b_ex;
+ struct ext4_free_extent ac_f_ex;
+ __u16 ac_groups_scanned;
+ __u16 ac_found;
+ __u16 ac_tail;
+ __u16 ac_buddy;
+ __u16 ac_flags;
+ __u8 ac_status;
+ __u8 ac_criteria;
+ __u8 ac_2order;
+ __u8 ac_op;
+ struct page *ac_bitmap_page;
+ struct page *ac_buddy_page;
+ struct ext4_prealloc_space *ac_pa;
+ struct ext4_locality_group *ac_lg;
+};
+
+struct ext4_buddy {
+ struct page *bd_buddy_page;
+ void *bd_buddy;
+ struct page *bd_bitmap_page;
+ void *bd_bitmap;
+ struct ext4_group_info *bd_info;
+ struct super_block *bd_sb;
+ __u16 bd_blkbits;
+ ext4_group_t bd_group;
+};
+
+typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *);
+
+struct sg {
+ struct ext4_group_info info;
+ ext4_grpblk_t counters[18];
+};
+
+struct migrate_struct {
+ ext4_lblk_t first_block;
+ ext4_lblk_t last_block;
+ ext4_lblk_t curr_block;
+ ext4_fsblk_t first_pblock;
+ ext4_fsblk_t last_pblock;
+};
+
+struct mmp_struct {
+ __le32 mmp_magic;
+ __le32 mmp_seq;
+ __le64 mmp_time;
+ char mmp_nodename[64];
+ char mmp_bdevname[32];
+ __le16 mmp_check_interval;
+ __le16 mmp_pad1;
+ __le32 mmp_pad2[226];
+ __le32 mmp_checksum;
+};
+
+struct mmpd_data {
+ struct buffer_head *bh;
+ struct super_block *sb;
+};
+
+enum {
+ I_DATA_SEM_NORMAL = 0,
+ I_DATA_SEM_OTHER = 1,
+ I_DATA_SEM_QUOTA = 2,
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+ bool is_ciphertext_name;
+};
+
+struct ext4_dir_entry {
+ __le32 inode;
+ __le16 rec_len;
+ __le16 name_len;
+ char name[255];
+};
+
+struct ext4_dir_entry_tail {
+ __le32 det_reserved_zero1;
+ __le16 det_rec_len;
+ __u8 det_reserved_zero2;
+ __u8 det_reserved_ft;
+ __le32 det_checksum;
+};
+
+typedef enum {
+ EITHER = 0,
+ INDEX = 1,
+ DIRENT = 2,
+ DIRENT_HTREE = 3,
+} dirblock_type_t;
+
+struct fake_dirent {
+ __le32 inode;
+ __le16 rec_len;
+ u8 name_len;
+ u8 file_type;
+};
+
+struct dx_countlimit {
+ __le16 limit;
+ __le16 count;
+};
+
+struct dx_entry {
+ __le32 hash;
+ __le32 block;
+};
+
+struct dx_root_info {
+ __le32 reserved_zero;
+ u8 hash_version;
+ u8 info_length;
+ u8 indirect_levels;
+ u8 unused_flags;
+};
+
+struct dx_root {
+ struct fake_dirent dot;
+ char dot_name[4];
+ struct fake_dirent dotdot;
+ char dotdot_name[4];
+ struct dx_root_info info;
+ struct dx_entry entries[0];
+};
+
+struct dx_node {
+ struct fake_dirent fake;
+ struct dx_entry entries[0];
+};
+
+struct dx_frame {
+ struct buffer_head *bh;
+ struct dx_entry *entries;
+ struct dx_entry *at;
+};
+
+struct dx_map_entry {
+ u32 hash;
+ u16 offs;
+ u16 size;
+};
+
+struct dx_tail {
+ u32 dt_reserved;
+ __le32 dt_checksum;
+};
+
+struct ext4_renament {
+ struct inode *dir;
+ struct dentry *dentry;
+ struct inode *inode;
+ bool is_dir;
+ int dir_nlink_delta;
+ struct buffer_head *bh;
+ struct ext4_dir_entry_2 *de;
+ int inlined;
+ struct buffer_head *dir_bh;
+ struct ext4_dir_entry_2 *parent_de;
+ int dir_inlined;
+};
+
+union fscrypt_context;
+
+enum bio_post_read_step {
+ STEP_INITIAL = 0,
+ STEP_DECRYPT = 1,
+ STEP_VERITY = 2,
+ STEP_MAX = 3,
+};
+
+struct bio_post_read_ctx {
+ struct bio *bio;
+ struct work_struct work;
+ unsigned int cur_step;
+ unsigned int enabled_steps;
+};
+
+enum {
+ BLOCK_BITMAP = 0,
+ INODE_BITMAP = 1,
+ INODE_TABLE = 2,
+ GROUP_TABLE_COUNT = 3,
+};
+
+struct ext4_rcu_ptr {
+ struct callback_head rcu;
+ void *ptr;
+};
+
+struct ext4_new_flex_group_data {
+ struct ext4_new_group_data *groups;
+ __u16 *bg_flags;
+ ext4_group_t count;
+};
+
+enum stat_group {
+ STAT_READ = 0,
+ STAT_WRITE = 1,
+ STAT_DISCARD = 2,
+ STAT_FLUSH = 3,
+ NR_STAT_GROUPS = 4,
+};
+
+struct ext4_lazy_init {
+ long unsigned int li_state;
+ struct list_head li_request_list;
+ struct mutex li_list_mtx;
+};
+
+struct ext4_journal_cb_entry {
+ struct list_head jce_list;
+ void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int);
+};
+
+struct trace_event_raw_ext4_other_inode_update_time {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ino_t orig_ino;
+ uid_t uid;
+ gid_t gid;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_free_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ uid_t uid;
+ gid_t gid;
+ __u64 blocks;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_request_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t dir;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_allocate_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ino_t dir;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_evict_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int nlink;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_drop_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int drop;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_nfs_commit_metadata {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mark_inode_dirty {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long unsigned int ip;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_begin_ordered_truncate {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t new_size;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__write_begin {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t pos;
+ unsigned int len;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__write_end {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t pos;
+ unsigned int len;
+ unsigned int copied;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_writepages {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long int nr_to_write;
+ long int pages_skipped;
+ loff_t range_start;
+ loff_t range_end;
+ long unsigned int writeback_index;
+ int sync_mode;
+ char for_kupdate;
+ char range_cyclic;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_da_write_pages {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long unsigned int first_page;
+ long int nr_to_write;
+ int sync_mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_da_write_pages_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 lblk;
+ __u32 len;
+ __u32 flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_writepages_result {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int ret;
+ int pages_written;
+ long int pages_skipped;
+ long unsigned int writeback_index;
+ int sync_mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__page_op {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long unsigned int index;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_invalidatepage_op {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ long unsigned int index;
+ unsigned int offset;
+ unsigned int length;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_discard_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ __u64 blk;
+ __u64 count;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__mb_new_pa {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 pa_pstart;
+ __u64 pa_lstart;
+ __u32 pa_len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mb_release_inode_pa {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 block;
+ __u32 count;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mb_release_group_pa {
+ struct trace_entry ent;
+ dev_t dev;
+ __u64 pa_pstart;
+ __u32 pa_len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_discard_preallocations {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mb_discard_preallocations {
+ struct trace_entry ent;
+ dev_t dev;
+ int needed;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_request_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ unsigned int len;
+ __u32 logical;
+ __u32 lleft;
+ __u32 lright;
+ __u64 goal;
+ __u64 pleft;
+ __u64 pright;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_allocate_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 block;
+ unsigned int len;
+ __u32 logical;
+ __u32 lleft;
+ __u32 lright;
+ __u64 goal;
+ __u64 pleft;
+ __u64 pright;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_free_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 block;
+ long unsigned int count;
+ int flags;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_sync_file_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ino_t parent;
+ int datasync;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_sync_file_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_sync_fs {
+ struct trace_entry ent;
+ dev_t dev;
+ int wait;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_alloc_da_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ unsigned int data_blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mballoc_alloc {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u32 orig_logical;
+ int orig_start;
+ __u32 orig_group;
+ int orig_len;
+ __u32 goal_logical;
+ int goal_start;
+ __u32 goal_group;
+ int goal_len;
+ __u32 result_logical;
+ int result_start;
+ __u32 result_group;
+ int result_len;
+ __u16 found;
+ __u16 groups;
+ __u16 buddy;
+ __u16 flags;
+ __u16 tail;
+ __u8 cr;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_mballoc_prealloc {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u32 orig_logical;
+ int orig_start;
+ __u32 orig_group;
+ int orig_len;
+ __u32 result_logical;
+ int result_start;
+ __u32 result_group;
+ int result_len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__mballoc {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int result_start;
+ __u32 result_group;
+ int result_len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_forget {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 block;
+ int is_metadata;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_da_update_reserve_space {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 i_blocks;
+ int used_blocks;
+ int reserved_data_blocks;
+ int quota_claim;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_da_reserve_space {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 i_blocks;
+ int reserved_data_blocks;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_da_release_space {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 i_blocks;
+ int freed_blocks;
+ int reserved_data_blocks;
+ __u16 mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__bitmap_load {
+ struct trace_entry ent;
+ dev_t dev;
+ __u32 group;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_direct_IO_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t pos;
+ long unsigned int len;
+ int rw;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_direct_IO_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t pos;
+ long unsigned int len;
+ int rw;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__fallocate_mode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t offset;
+ loff_t len;
+ int mode;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_fallocate_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t pos;
+ unsigned int blocks;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_unlink_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ino_t parent;
+ loff_t size;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_unlink_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__truncate {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ __u64 blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_convert_to_initialized_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t m_lblk;
+ unsigned int m_len;
+ ext4_lblk_t u_lblk;
+ unsigned int u_len;
+ ext4_fsblk_t u_pblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t m_lblk;
+ unsigned int m_len;
+ ext4_lblk_t u_lblk;
+ unsigned int u_len;
+ ext4_fsblk_t u_pblk;
+ ext4_lblk_t i_lblk;
+ unsigned int i_len;
+ ext4_fsblk_t i_pblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__map_blocks_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ unsigned int len;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__map_blocks_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ unsigned int flags;
+ ext4_fsblk_t pblk;
+ ext4_lblk_t lblk;
+ unsigned int len;
+ unsigned int mflags;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_load_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_fsblk_t pblk;
+ ext4_lblk_t lblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_load_inode {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_journal_start {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int ip;
+ int blocks;
+ int rsv_blocks;
+ int revoke_creds;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_journal_start_reserved {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int ip;
+ int blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__trim {
+ struct trace_entry ent;
+ int dev_major;
+ int dev_minor;
+ __u32 group;
+ int start;
+ int len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_handle_unwritten_extents {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ int flags;
+ ext4_lblk_t lblk;
+ ext4_fsblk_t pblk;
+ unsigned int len;
+ unsigned int allocated;
+ ext4_fsblk_t newblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_get_implied_cluster_alloc_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ unsigned int flags;
+ ext4_lblk_t lblk;
+ ext4_fsblk_t pblk;
+ unsigned int len;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_put_in_cache {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ unsigned int len;
+ ext4_fsblk_t start;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_in_cache {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_find_delalloc_range {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t from;
+ ext4_lblk_t to;
+ int reverse;
+ int found;
+ ext4_lblk_t found_blk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_get_reserved_cluster_alloc {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ unsigned int len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_show_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_fsblk_t pblk;
+ ext4_lblk_t lblk;
+ short unsigned int len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_remove_blocks {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t from;
+ ext4_lblk_t to;
+ ext4_fsblk_t ee_pblk;
+ ext4_lblk_t ee_lblk;
+ short unsigned int ee_len;
+ ext4_fsblk_t pc_pclu;
+ ext4_lblk_t pc_lblk;
+ int pc_state;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_rm_leaf {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t start;
+ ext4_lblk_t ee_lblk;
+ ext4_fsblk_t ee_pblk;
+ short int ee_len;
+ ext4_fsblk_t pc_pclu;
+ ext4_lblk_t pc_lblk;
+ int pc_state;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_rm_idx {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_fsblk_t pblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_remove_space {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t start;
+ ext4_lblk_t end;
+ int depth;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_ext_remove_space_done {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t start;
+ ext4_lblk_t end;
+ int depth;
+ ext4_fsblk_t pc_pclu;
+ ext4_lblk_t pc_lblk;
+ int pc_state;
+ short unsigned int eh_entries;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__es_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ ext4_lblk_t len;
+ ext4_fsblk_t pblk;
+ char status;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_remove_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t lblk;
+ loff_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_find_extent_range_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_find_extent_range_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ ext4_lblk_t len;
+ ext4_fsblk_t pblk;
+ char status;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_lookup_extent_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_lookup_extent_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ ext4_lblk_t len;
+ ext4_fsblk_t pblk;
+ char status;
+ int found;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4__es_shrink_enter {
+ struct trace_entry ent;
+ dev_t dev;
+ int nr_to_scan;
+ int cache_cnt;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_shrink_scan_exit {
+ struct trace_entry ent;
+ dev_t dev;
+ int nr_shrunk;
+ int cache_cnt;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_collapse_range {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t offset;
+ loff_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_insert_range {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ loff_t offset;
+ loff_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_shrink {
+ struct trace_entry ent;
+ dev_t dev;
+ int nr_shrunk;
+ long long unsigned int scan_time;
+ int nr_skipped;
+ int retried;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_es_insert_delayed_block {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ ext4_lblk_t lblk;
+ ext4_lblk_t len;
+ ext4_fsblk_t pblk;
+ char status;
+ bool allocated;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_fsmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ dev_t keydev;
+ u32 agno;
+ u64 bno;
+ u64 len;
+ u64 owner;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_getfsmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ dev_t keydev;
+ u64 block;
+ u64 len;
+ u64 owner;
+ u64 flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_shutdown {
+ struct trace_entry ent;
+ dev_t dev;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_ext4_error {
+ struct trace_entry ent;
+ dev_t dev;
+ const char *function;
+ unsigned int line;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_ext4_other_inode_update_time {};
+
+struct trace_event_data_offsets_ext4_free_inode {};
+
+struct trace_event_data_offsets_ext4_request_inode {};
+
+struct trace_event_data_offsets_ext4_allocate_inode {};
+
+struct trace_event_data_offsets_ext4_evict_inode {};
+
+struct trace_event_data_offsets_ext4_drop_inode {};
+
+struct trace_event_data_offsets_ext4_nfs_commit_metadata {};
+
+struct trace_event_data_offsets_ext4_mark_inode_dirty {};
+
+struct trace_event_data_offsets_ext4_begin_ordered_truncate {};
+
+struct trace_event_data_offsets_ext4__write_begin {};
+
+struct trace_event_data_offsets_ext4__write_end {};
+
+struct trace_event_data_offsets_ext4_writepages {};
+
+struct trace_event_data_offsets_ext4_da_write_pages {};
+
+struct trace_event_data_offsets_ext4_da_write_pages_extent {};
+
+struct trace_event_data_offsets_ext4_writepages_result {};
+
+struct trace_event_data_offsets_ext4__page_op {};
+
+struct trace_event_data_offsets_ext4_invalidatepage_op {};
+
+struct trace_event_data_offsets_ext4_discard_blocks {};
+
+struct trace_event_data_offsets_ext4__mb_new_pa {};
+
+struct trace_event_data_offsets_ext4_mb_release_inode_pa {};
+
+struct trace_event_data_offsets_ext4_mb_release_group_pa {};
+
+struct trace_event_data_offsets_ext4_discard_preallocations {};
+
+struct trace_event_data_offsets_ext4_mb_discard_preallocations {};
+
+struct trace_event_data_offsets_ext4_request_blocks {};
+
+struct trace_event_data_offsets_ext4_allocate_blocks {};
+
+struct trace_event_data_offsets_ext4_free_blocks {};
+
+struct trace_event_data_offsets_ext4_sync_file_enter {};
+
+struct trace_event_data_offsets_ext4_sync_file_exit {};
+
+struct trace_event_data_offsets_ext4_sync_fs {};
+
+struct trace_event_data_offsets_ext4_alloc_da_blocks {};
+
+struct trace_event_data_offsets_ext4_mballoc_alloc {};
+
+struct trace_event_data_offsets_ext4_mballoc_prealloc {};
+
+struct trace_event_data_offsets_ext4__mballoc {};
+
+struct trace_event_data_offsets_ext4_forget {};
+
+struct trace_event_data_offsets_ext4_da_update_reserve_space {};
+
+struct trace_event_data_offsets_ext4_da_reserve_space {};
+
+struct trace_event_data_offsets_ext4_da_release_space {};
+
+struct trace_event_data_offsets_ext4__bitmap_load {};
+
+struct trace_event_data_offsets_ext4_direct_IO_enter {};
+
+struct trace_event_data_offsets_ext4_direct_IO_exit {};
+
+struct trace_event_data_offsets_ext4__fallocate_mode {};
+
+struct trace_event_data_offsets_ext4_fallocate_exit {};
+
+struct trace_event_data_offsets_ext4_unlink_enter {};
+
+struct trace_event_data_offsets_ext4_unlink_exit {};
+
+struct trace_event_data_offsets_ext4__truncate {};
+
+struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {};
+
+struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {};
+
+struct trace_event_data_offsets_ext4__map_blocks_enter {};
+
+struct trace_event_data_offsets_ext4__map_blocks_exit {};
+
+struct trace_event_data_offsets_ext4_ext_load_extent {};
+
+struct trace_event_data_offsets_ext4_load_inode {};
+
+struct trace_event_data_offsets_ext4_journal_start {};
+
+struct trace_event_data_offsets_ext4_journal_start_reserved {};
+
+struct trace_event_data_offsets_ext4__trim {};
+
+struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {};
+
+struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {};
+
+struct trace_event_data_offsets_ext4_ext_put_in_cache {};
+
+struct trace_event_data_offsets_ext4_ext_in_cache {};
+
+struct trace_event_data_offsets_ext4_find_delalloc_range {};
+
+struct trace_event_data_offsets_ext4_get_reserved_cluster_alloc {};
+
+struct trace_event_data_offsets_ext4_ext_show_extent {};
+
+struct trace_event_data_offsets_ext4_remove_blocks {};
+
+struct trace_event_data_offsets_ext4_ext_rm_leaf {};
+
+struct trace_event_data_offsets_ext4_ext_rm_idx {};
+
+struct trace_event_data_offsets_ext4_ext_remove_space {};
+
+struct trace_event_data_offsets_ext4_ext_remove_space_done {};
+
+struct trace_event_data_offsets_ext4__es_extent {};
+
+struct trace_event_data_offsets_ext4_es_remove_extent {};
+
+struct trace_event_data_offsets_ext4_es_find_extent_range_enter {};
+
+struct trace_event_data_offsets_ext4_es_find_extent_range_exit {};
+
+struct trace_event_data_offsets_ext4_es_lookup_extent_enter {};
+
+struct trace_event_data_offsets_ext4_es_lookup_extent_exit {};
+
+struct trace_event_data_offsets_ext4__es_shrink_enter {};
+
+struct trace_event_data_offsets_ext4_es_shrink_scan_exit {};
+
+struct trace_event_data_offsets_ext4_collapse_range {};
+
+struct trace_event_data_offsets_ext4_insert_range {};
+
+struct trace_event_data_offsets_ext4_es_shrink {};
+
+struct trace_event_data_offsets_ext4_es_insert_delayed_block {};
+
+struct trace_event_data_offsets_ext4_fsmap_class {};
+
+struct trace_event_data_offsets_ext4_getfsmap_class {};
+
+struct trace_event_data_offsets_ext4_shutdown {};
+
+struct trace_event_data_offsets_ext4_error {};
+
+typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t);
+
+typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int);
+
+typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int);
+
+typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int);
+
+typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int);
+
+typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t);
+
+typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *);
+
+typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *);
+
+typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *);
+
+typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int);
+
+typedef void (*btf_trace_ext4_writepage)(void *, struct page *);
+
+typedef void (*btf_trace_ext4_readpage)(void *, struct page *);
+
+typedef void (*btf_trace_ext4_releasepage)(void *, struct page *);
+
+typedef void (*btf_trace_ext4_invalidatepage)(void *, struct page *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_journalled_invalidatepage)(void *, struct page *, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int);
+
+typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *);
+
+typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *);
+
+typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *);
+
+typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int);
+
+typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *);
+
+typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int);
+
+typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int);
+
+typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int);
+
+typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int);
+
+typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int);
+
+typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *);
+
+typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *);
+
+typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t);
+
+typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t);
+
+typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64);
+
+typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int);
+
+typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int);
+
+typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int);
+
+typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int);
+
+typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int);
+
+typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int);
+
+typedef void (*btf_trace_ext4_direct_IO_enter)(void *, struct inode *, loff_t, long unsigned int, int);
+
+typedef void (*btf_trace_ext4_direct_IO_exit)(void *, struct inode *, loff_t, long unsigned int, int, int);
+
+typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int);
+
+typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int);
+
+typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int);
+
+typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int);
+
+typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *);
+
+typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int);
+
+typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *);
+
+typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *);
+
+typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int);
+
+typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int);
+
+typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int);
+
+typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t);
+
+typedef void (*btf_trace_ext4_load_inode)(void *, struct inode *);
+
+typedef void (*btf_trace_ext4_journal_start)(void *, struct super_block *, int, int, int, long unsigned int);
+
+typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int);
+
+typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t);
+
+typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t);
+
+typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t);
+
+typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int);
+
+typedef void (*btf_trace_ext4_ext_put_in_cache)(void *, struct inode *, ext4_lblk_t, unsigned int, ext4_fsblk_t);
+
+typedef void (*btf_trace_ext4_ext_in_cache)(void *, struct inode *, ext4_lblk_t, int);
+
+typedef void (*btf_trace_ext4_find_delalloc_range)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, int, ext4_lblk_t);
+
+typedef void (*btf_trace_ext4_get_reserved_cluster_alloc)(void *, struct inode *, ext4_lblk_t, unsigned int);
+
+typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int);
+
+typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *);
+
+typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *);
+
+typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t);
+
+typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int);
+
+typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16);
+
+typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *);
+
+typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *);
+
+typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t);
+
+typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t);
+
+typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *);
+
+typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t);
+
+typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int);
+
+typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int);
+
+typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int);
+
+typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int);
+
+typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t);
+
+typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t);
+
+typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int);
+
+typedef void (*btf_trace_ext4_es_insert_delayed_block)(void *, struct inode *, struct extent_status *, bool);
+
+typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64);
+
+typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64);
+
+typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64);
+
+typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *);
+
+typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *);
+
+typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *);
+
+typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int);
+
+typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int);
+
+enum {
+ Opt_bsd_df = 0,
+ Opt_minix_df = 1,
+ Opt_grpid = 2,
+ Opt_nogrpid = 3,
+ Opt_resgid = 4,
+ Opt_resuid = 5,
+ Opt_sb = 6,
+ Opt_err_cont = 7,
+ Opt_err_panic = 8,
+ Opt_err_ro = 9,
+ Opt_nouid32 = 10,
+ Opt_debug = 11,
+ Opt_removed = 12,
+ Opt_user_xattr = 13,
+ Opt_nouser_xattr = 14,
+ Opt_acl = 15,
+ Opt_noacl = 16,
+ Opt_auto_da_alloc = 17,
+ Opt_noauto_da_alloc = 18,
+ Opt_noload = 19,
+ Opt_commit = 20,
+ Opt_min_batch_time = 21,
+ Opt_max_batch_time = 22,
+ Opt_journal_dev = 23,
+ Opt_journal_path = 24,
+ Opt_journal_checksum = 25,
+ Opt_journal_async_commit = 26,
+ Opt_abort = 27,
+ Opt_data_journal = 28,
+ Opt_data_ordered = 29,
+ Opt_data_writeback = 30,
+ Opt_data_err_abort = 31,
+ Opt_data_err_ignore = 32,
+ Opt_test_dummy_encryption = 33,
+ Opt_usrjquota = 34,
+ Opt_grpjquota = 35,
+ Opt_offusrjquota = 36,
+ Opt_offgrpjquota = 37,
+ Opt_jqfmt_vfsold = 38,
+ Opt_jqfmt_vfsv0 = 39,
+ Opt_jqfmt_vfsv1 = 40,
+ Opt_quota = 41,
+ Opt_noquota = 42,
+ Opt_barrier = 43,
+ Opt_nobarrier = 44,
+ Opt_err___2 = 45,
+ Opt_usrquota = 46,
+ Opt_grpquota = 47,
+ Opt_prjquota = 48,
+ Opt_i_version = 49,
+ Opt_dax = 50,
+ Opt_dax_always = 51,
+ Opt_dax_inode = 52,
+ Opt_dax_never = 53,
+ Opt_stripe = 54,
+ Opt_delalloc = 55,
+ Opt_nodelalloc = 56,
+ Opt_warn_on_error = 57,
+ Opt_nowarn_on_error = 58,
+ Opt_mblk_io_submit = 59,
+ Opt_lazytime = 60,
+ Opt_nolazytime = 61,
+ Opt_debug_want_extra_isize = 62,
+ Opt_nomblk_io_submit = 63,
+ Opt_block_validity = 64,
+ Opt_noblock_validity = 65,
+ Opt_inode_readahead_blks = 66,
+ Opt_journal_ioprio = 67,
+ Opt_dioread_nolock = 68,
+ Opt_dioread_lock = 69,
+ Opt_discard = 70,
+ Opt_nodiscard = 71,
+ Opt_init_itable = 72,
+ Opt_noinit_itable = 73,
+ Opt_max_dir_size_kb = 74,
+ Opt_nojournal_checksum = 75,
+ Opt_nombcache = 76,
+};
+
+struct mount_opts {
+ int token;
+ int mount_opt;
+ int flags;
+};
+
+struct ext4_mount_options {
+ long unsigned int s_mount_opt;
+ long unsigned int s_mount_opt2;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ long unsigned int s_commit_interval;
+ u32 s_min_batch_time;
+ u32 s_max_batch_time;
+};
+
+enum {
+ attr_noop = 0,
+ attr_delayed_allocation_blocks = 1,
+ attr_session_write_kbytes = 2,
+ attr_lifetime_write_kbytes = 3,
+ attr_reserved_clusters = 4,
+ attr_inode_readahead = 5,
+ attr_trigger_test_error = 6,
+ attr_first_error_time = 7,
+ attr_last_error_time = 8,
+ attr_feature = 9,
+ attr_pointer_ui = 10,
+ attr_pointer_ul = 11,
+ attr_pointer_u64 = 12,
+ attr_pointer_u8 = 13,
+ attr_pointer_string = 14,
+ attr_pointer_atomic = 15,
+ attr_journal_task = 16,
+};
+
+enum {
+ ptr_explicit = 0,
+ ptr_ext4_sb_info_offset = 1,
+ ptr_ext4_super_block_offset = 2,
+};
+
+struct ext4_attr {
+ struct attribute attr;
+ short int attr_id;
+ short int attr_ptr;
+ short unsigned int attr_size;
+ union {
+ int offset;
+ void *explicit_ptr;
+ } u;
+};
+
+struct ext4_xattr_header {
+ __le32 h_magic;
+ __le32 h_refcount;
+ __le32 h_blocks;
+ __le32 h_hash;
+ __le32 h_checksum;
+ __u32 h_reserved[3];
+};
+
+struct ext4_xattr_block_find {
+ struct ext4_xattr_search s;
+ struct buffer_head *bh;
+};
+
+typedef struct {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+} ext4_acl_entry;
+
+typedef struct {
+ __le32 a_version;
+} ext4_acl_header;
+
+struct xattr {
+ const char *name;
+ void *value;
+ size_t value_len;
+};
+
+struct commit_header {
+ __be32 h_magic;
+ __be32 h_blocktype;
+ __be32 h_sequence;
+ unsigned char h_chksum_type;
+ unsigned char h_chksum_size;
+ unsigned char h_padding[2];
+ __be32 h_chksum[8];
+ __be64 h_commit_sec;
+ __be32 h_commit_nsec;
+};
+
+struct journal_block_tag3_s {
+ __be32 t_blocknr;
+ __be32 t_flags;
+ __be32 t_blocknr_high;
+ __be32 t_checksum;
+};
+
+typedef struct journal_block_tag3_s journal_block_tag3_t;
+
+struct journal_block_tag_s {
+ __be32 t_blocknr;
+ __be16 t_checksum;
+ __be16 t_flags;
+ __be32 t_blocknr_high;
+};
+
+typedef struct journal_block_tag_s journal_block_tag_t;
+
+struct jbd2_journal_block_tail {
+ __be32 t_checksum;
+};
+
+struct jbd2_journal_revoke_header_s {
+ journal_header_t r_header;
+ __be32 r_count;
+};
+
+typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t;
+
+struct recovery_info {
+ tid_t start_transaction;
+ tid_t end_transaction;
+ int nr_replays;
+ int nr_revokes;
+ int nr_revoke_hits;
+};
+
+enum passtype {
+ PASS_SCAN = 0,
+ PASS_REVOKE = 1,
+ PASS_REPLAY = 2,
+};
+
+struct jbd2_revoke_table_s {
+ int hash_size;
+ int hash_shift;
+ struct list_head *hash_table;
+};
+
+struct jbd2_revoke_record_s {
+ struct list_head hash;
+ tid_t sequence;
+ long long unsigned int blocknr;
+};
+
+struct trace_event_raw_jbd2_checkpoint {
+ struct trace_entry ent;
+ dev_t dev;
+ int result;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_commit {
+ struct trace_entry ent;
+ dev_t dev;
+ char sync_commit;
+ int transaction;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_end_commit {
+ struct trace_entry ent;
+ dev_t dev;
+ char sync_commit;
+ int transaction;
+ int head;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_submit_inode_data {
+ struct trace_entry ent;
+ dev_t dev;
+ ino_t ino;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_handle_start_class {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int tid;
+ unsigned int type;
+ unsigned int line_no;
+ int requested_blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_handle_extend {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int tid;
+ unsigned int type;
+ unsigned int line_no;
+ int buffer_credits;
+ int requested_blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_handle_stats {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int tid;
+ unsigned int type;
+ unsigned int line_no;
+ int interval;
+ int sync;
+ int requested_blocks;
+ int dirtied_blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_run_stats {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int tid;
+ long unsigned int wait;
+ long unsigned int request_delay;
+ long unsigned int running;
+ long unsigned int locked;
+ long unsigned int flushing;
+ long unsigned int logging;
+ __u32 handle_count;
+ __u32 blocks;
+ __u32 blocks_logged;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_checkpoint_stats {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int tid;
+ long unsigned int chp_time;
+ __u32 forced_to_close;
+ __u32 written;
+ __u32 dropped;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_update_log_tail {
+ struct trace_entry ent;
+ dev_t dev;
+ tid_t tail_sequence;
+ tid_t first_tid;
+ long unsigned int block_nr;
+ long unsigned int freed;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_write_superblock {
+ struct trace_entry ent;
+ dev_t dev;
+ int write_op;
+ char __data[0];
+};
+
+struct trace_event_raw_jbd2_lock_buffer_stall {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int stall_ms;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_jbd2_checkpoint {};
+
+struct trace_event_data_offsets_jbd2_commit {};
+
+struct trace_event_data_offsets_jbd2_end_commit {};
+
+struct trace_event_data_offsets_jbd2_submit_inode_data {};
+
+struct trace_event_data_offsets_jbd2_handle_start_class {};
+
+struct trace_event_data_offsets_jbd2_handle_extend {};
+
+struct trace_event_data_offsets_jbd2_handle_stats {};
+
+struct trace_event_data_offsets_jbd2_run_stats {};
+
+struct trace_event_data_offsets_jbd2_checkpoint_stats {};
+
+struct trace_event_data_offsets_jbd2_update_log_tail {};
+
+struct trace_event_data_offsets_jbd2_write_superblock {};
+
+struct trace_event_data_offsets_jbd2_lock_buffer_stall {};
+
+typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int);
+
+typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *);
+
+typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *);
+
+typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int);
+
+typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int);
+
+typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int);
+
+typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int, int, int);
+
+typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, long unsigned int, struct transaction_run_stats_s *);
+
+typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, long unsigned int, struct transaction_chp_stats_s *);
+
+typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int);
+
+typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, int);
+
+typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int);
+
+struct jbd2_stats_proc_session {
+ journal_t *journal;
+ struct transaction_stats_s *stats;
+ int start;
+ int max;
+};
+
+struct ramfs_mount_opts {
+ umode_t mode;
+};
+
+struct ramfs_fs_info {
+ struct ramfs_mount_opts mount_opts;
+};
+
+enum ramfs_param {
+ Opt_mode___3 = 0,
+};
+
+enum hugetlbfs_size_type {
+ NO_SIZE = 0,
+ SIZE_STD = 1,
+ SIZE_PERCENT = 2,
+};
+
+struct hugetlbfs_fs_context {
+ struct hstate *hstate;
+ long long unsigned int max_size_opt;
+ long long unsigned int min_size_opt;
+ long int max_hpages;
+ long int nr_inodes;
+ long int min_hpages;
+ enum hugetlbfs_size_type max_val_type;
+ enum hugetlbfs_size_type min_val_type;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
+enum hugetlb_param {
+ Opt_gid___4 = 0,
+ Opt_min_size = 1,
+ Opt_mode___4 = 2,
+ Opt_nr_inodes___2 = 3,
+ Opt_pagesize = 4,
+ Opt_size___2 = 5,
+ Opt_uid___3 = 6,
+};
+
+struct getdents_callback___2 {
+ struct dir_context ctx;
+ char *name;
+ u64 ino;
+ int found;
+ int sequence;
+};
+
+typedef u16 wchar_t;
+
+typedef u32 unicode_t;
+
+struct nls_table {
+ const char *charset;
+ const char *alias;
+ int (*uni2char)(wchar_t, unsigned char *, int);
+ int (*char2uni)(const unsigned char *, int, wchar_t *);
+ const unsigned char *charset2lower;
+ const unsigned char *charset2upper;
+ struct module *owner;
+ struct nls_table *next;
+};
+
+enum utf16_endian {
+ UTF16_HOST_ENDIAN = 0,
+ UTF16_LITTLE_ENDIAN = 1,
+ UTF16_BIG_ENDIAN = 2,
+};
+
+struct utf8_table {
+ int cmask;
+ int cval;
+ int shift;
+ long int lmask;
+ long int lval;
+};
+
+typedef unsigned int autofs_wqt_t;
+
+struct autofs_sb_info;
+
+struct autofs_info {
+ struct dentry *dentry;
+ struct inode *inode;
+ int flags;
+ struct completion expire_complete;
+ struct list_head active;
+ struct list_head expiring;
+ struct autofs_sb_info *sbi;
+ long unsigned int last_used;
+ int count;
+ kuid_t uid;
+ kgid_t gid;
+ struct callback_head rcu;
+};
+
+struct autofs_wait_queue;
+
+struct autofs_sb_info {
+ u32 magic;
+ int pipefd;
+ struct file *pipe;
+ struct pid *oz_pgrp;
+ int version;
+ int sub_version;
+ int min_proto;
+ int max_proto;
+ unsigned int flags;
+ long unsigned int exp_timeout;
+ unsigned int type;
+ struct super_block *sb;
+ struct mutex wq_mutex;
+ struct mutex pipe_mutex;
+ spinlock_t fs_lock;
+ struct autofs_wait_queue *queues;
+ spinlock_t lookup_lock;
+ struct list_head active_list;
+ struct list_head expiring_list;
+ struct callback_head rcu;
+};
+
+struct autofs_wait_queue {
+ wait_queue_head_t queue;
+ struct autofs_wait_queue *next;
+ autofs_wqt_t wait_queue_token;
+ struct qstr name;
+ u32 dev;
+ u64 ino;
+ kuid_t uid;
+ kgid_t gid;
+ pid_t pid;
+ pid_t tgid;
+ int status;
+ unsigned int wait_ctr;
+};
+
+enum {
+ Opt_err___3 = 0,
+ Opt_fd = 1,
+ Opt_uid___4 = 2,
+ Opt_gid___5 = 3,
+ Opt_pgrp = 4,
+ Opt_minproto = 5,
+ Opt_maxproto = 6,
+ Opt_indirect = 7,
+ Opt_direct = 8,
+ Opt_offset = 9,
+ Opt_strictexpire = 10,
+ Opt_ignore = 11,
+};
+
+enum {
+ AUTOFS_IOC_READY_CMD = 96,
+ AUTOFS_IOC_FAIL_CMD = 97,
+ AUTOFS_IOC_CATATONIC_CMD = 98,
+ AUTOFS_IOC_PROTOVER_CMD = 99,
+ AUTOFS_IOC_SETTIMEOUT_CMD = 100,
+ AUTOFS_IOC_EXPIRE_CMD = 101,
+};
+
+enum autofs_notify {
+ NFY_NONE = 0,
+ NFY_MOUNT = 1,
+ NFY_EXPIRE = 2,
+};
+
+enum {
+ AUTOFS_IOC_EXPIRE_MULTI_CMD = 102,
+ AUTOFS_IOC_PROTOSUBVER_CMD = 103,
+ AUTOFS_IOC_ASKUMOUNT_CMD = 112,
+};
+
+struct autofs_packet_hdr {
+ int proto_version;
+ int type;
+};
+
+struct autofs_packet_missing {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[256];
+};
+
+struct autofs_packet_expire {
+ struct autofs_packet_hdr hdr;
+ int len;
+ char name[256];
+};
+
+struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[256];
+};
+
+union autofs_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_packet_missing missing;
+ struct autofs_packet_expire expire;
+ struct autofs_packet_expire_multi expire_multi;
+};
+
+struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[256];
+};
+
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
+
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
+
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
+
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
+
+union autofs_v5_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_v5_packet v5_packet;
+ autofs_packet_missing_indirect_t missing_indirect;
+ autofs_packet_expire_indirect_t expire_indirect;
+ autofs_packet_missing_direct_t missing_direct;
+ autofs_packet_expire_direct_t expire_direct;
+};
+
+struct args_protover {
+ __u32 version;
+};
+
+struct args_protosubver {
+ __u32 sub_version;
+};
+
+struct args_openmount {
+ __u32 devid;
+};
+
+struct args_ready {
+ __u32 token;
+};
+
+struct args_fail {
+ __u32 token;
+ __s32 status;
+};
+
+struct args_setpipefd {
+ __s32 pipefd;
+};
+
+struct args_timeout {
+ __u64 timeout;
+};
+
+struct args_requester {
+ __u32 uid;
+ __u32 gid;
+};
+
+struct args_expire {
+ __u32 how;
+};
+
+struct args_askumount {
+ __u32 may_umount;
+};
+
+struct args_in {
+ __u32 type;
+};
+
+struct args_out {
+ __u32 devid;
+ __u32 magic;
+};
+
+struct args_ismountpoint {
+ union {
+ struct args_in in;
+ struct args_out out;
+ };
+};
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size;
+ __s32 ioctlfd;
+ union {
+ struct args_protover protover;
+ struct args_protosubver protosubver;
+ struct args_openmount openmount;
+ struct args_ready ready;
+ struct args_fail fail;
+ struct args_setpipefd setpipefd;
+ struct args_timeout timeout;
+ struct args_requester requester;
+ struct args_expire expire;
+ struct args_askumount askumount;
+ struct args_ismountpoint ismountpoint;
+ };
+ char path[0];
+};
+
+enum {
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 113,
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114,
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115,
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116,
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117,
+ AUTOFS_DEV_IOCTL_READY_CMD = 118,
+ AUTOFS_DEV_IOCTL_FAIL_CMD = 119,
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120,
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121,
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122,
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123,
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124,
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125,
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126,
+};
+
+typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *);
+
+struct ovl_config {
+ char *lowerdir;
+ char *upperdir;
+ char *workdir;
+ bool default_permissions;
+ bool redirect_dir;
+ bool redirect_follow;
+ const char *redirect_mode;
+ bool index;
+ bool nfs_export;
+ int xino;
+ bool metacopy;
+};
+
+struct ovl_sb {
+ struct super_block *sb;
+ dev_t pseudo_dev;
+ bool bad_uuid;
+ bool is_lower;
+};
+
+struct ovl_layer {
+ struct vfsmount *mnt;
+ struct inode *trap;
+ struct ovl_sb *fs;
+ int idx;
+ int fsid;
+};
+
+struct ovl_path {
+ const struct ovl_layer *layer;
+ struct dentry *dentry;
+};
+
+struct ovl_fs {
+ unsigned int numlayer;
+ unsigned int numfs;
+ const struct ovl_layer *layers;
+ struct ovl_sb *fs;
+ struct dentry *workbasedir;
+ struct dentry *workdir;
+ struct dentry *indexdir;
+ long int namelen;
+ struct ovl_config config;
+ const struct cred *creator_cred;
+ bool tmpfile;
+ bool noxattr;
+ bool upperdir_locked;
+ bool workdir_locked;
+ bool share_whiteout;
+ struct inode *workbasedir_trap;
+ struct inode *workdir_trap;
+ struct inode *indexdir_trap;
+ int xino_mode;
+ atomic_long_t last_ino;
+ struct dentry *whiteout;
+};
+
+struct ovl_entry {
+ union {
+ struct {
+ long unsigned int flags;
+ };
+ struct callback_head rcu;
+ };
+ unsigned int numlower;
+ struct ovl_path lowerstack[0];
+};
+
+struct ovl_dir_cache;
+
+struct ovl_inode {
+ union {
+ struct ovl_dir_cache *cache;
+ struct inode *lowerdata;
+ };
+ const char *redirect;
+ u64 version;
+ long unsigned int flags;
+ struct inode vfs_inode;
+ struct dentry *__upperdentry;
+ struct inode *lower;
+ struct mutex lock;
+};
+
+enum ovl_inode_flag {
+ OVL_IMPURE = 0,
+ OVL_WHITEOUTS = 1,
+ OVL_INDEX = 2,
+ OVL_UPPERDATA = 3,
+ OVL_CONST_INO = 4,
+};
+
+enum ovl_entry_flag {
+ OVL_E_UPPER_ALIAS = 0,
+ OVL_E_OPAQUE = 1,
+ OVL_E_CONNECTED = 2,
+};
+
+enum {
+ OVL_XINO_OFF = 0,
+ OVL_XINO_AUTO = 1,
+ OVL_XINO_ON = 2,
+};
+
+struct ovl_inode_params {
+ struct inode *newinode;
+ struct dentry *upperdentry;
+ struct ovl_path *lowerpath;
+ bool index;
+ unsigned int numlower;
+ char *redirect;
+ struct dentry *lowerdata;
+};
+
+struct ovl_cattr {
+ dev_t rdev;
+ umode_t mode;
+ const char *link;
+ struct dentry *hardlink;
+};
+
+enum {
+ OPT_LOWERDIR = 0,
+ OPT_UPPERDIR = 1,
+ OPT_WORKDIR = 2,
+ OPT_DEFAULT_PERMISSIONS = 3,
+ OPT_REDIRECT_DIR = 4,
+ OPT_INDEX_ON = 5,
+ OPT_INDEX_OFF = 6,
+ OPT_NFS_EXPORT_ON = 7,
+ OPT_NFS_EXPORT_OFF = 8,
+ OPT_XINO_ON = 9,
+ OPT_XINO_OFF = 10,
+ OPT_XINO_AUTO = 11,
+ OPT_METACOPY_ON = 12,
+ OPT_METACOPY_OFF = 13,
+ OPT_ERR = 14,
+};
+
+struct ovl_fb {
+ u8 version;
+ u8 magic;
+ u8 len;
+ u8 flags;
+ u8 type;
+ uuid_t uuid;
+ u32 fid[0];
+} __attribute__((packed));
+
+struct ovl_fh {
+ u8 padding[3];
+ union {
+ struct ovl_fb fb;
+ u8 buf[0];
+ };
+} __attribute__((packed));
+
+struct ovl_lookup_data {
+ struct super_block *sb;
+ struct qstr name;
+ bool is_dir;
+ bool opaque;
+ bool stop;
+ bool last;
+ char *redirect;
+ bool metacopy;
+};
+
+enum ovl_path_type {
+ __OVL_PATH_UPPER = 1,
+ __OVL_PATH_MERGE = 2,
+ __OVL_PATH_ORIGIN = 4,
+};
+
+struct ovl_aio_req {
+ struct kiocb iocb;
+ struct kiocb *orig_iocb;
+ struct fd fd;
+};
+
+enum ovl_copyop {
+ OVL_COPY = 0,
+ OVL_CLONE = 1,
+ OVL_DEDUPE = 2,
+};
+
+struct ovl_dir_cache___2 {
+ long int refcount;
+ u64 version;
+ struct list_head entries;
+ struct rb_root root;
+};
+
+struct ovl_cache_entry {
+ unsigned int len;
+ unsigned int type;
+ u64 real_ino;
+ u64 ino;
+ struct list_head l_node;
+ struct rb_node node;
+ struct ovl_cache_entry *next_maybe_whiteout;
+ bool is_upper;
+ bool is_whiteout;
+ char name[0];
+};
+
+struct ovl_readdir_data {
+ struct dir_context ctx;
+ struct dentry *dentry;
+ bool is_lowest;
+ struct rb_root *root;
+ struct list_head *list;
+ struct list_head middle;
+ struct ovl_cache_entry *first_maybe_whiteout;
+ int count;
+ int err;
+ bool is_upper;
+ bool d_type_supported;
+};
+
+struct ovl_dir_file {
+ bool is_real;
+ bool is_upper;
+ struct ovl_dir_cache___2 *cache;
+ struct list_head *cursor;
+ struct file *realfile;
+ struct file *upperfile;
+};
+
+struct ovl_readdir_translate {
+ struct dir_context *orig_ctx;
+ struct ovl_dir_cache___2 *cache;
+ struct dir_context ctx;
+ u64 parent_ino;
+ int fsid;
+ int xinobits;
+ bool xinowarn;
+};
+
+struct ovl_copy_up_ctx {
+ struct dentry *parent;
+ struct dentry *dentry;
+ struct path lowerpath;
+ struct kstat stat;
+ struct kstat pstat;
+ const char *link;
+ struct dentry *destdir;
+ struct qstr destname;
+ struct dentry *workdir;
+ bool origin;
+ bool indexed;
+ bool metacopy;
+};
+
+struct ovl_cu_creds {
+ const struct cred *old;
+ struct cred *new;
+};
+
+typedef s8 int8_t;
+
+typedef __s64 xfs_off_t;
+
+typedef long long unsigned int xfs_ino_t;
+
+typedef __s64 xfs_daddr_t;
+
+typedef __u32 xfs_nlink_t;
+
+typedef uint32_t xfs_agblock_t;
+
+typedef uint32_t xfs_agino_t;
+
+typedef uint32_t xfs_extlen_t;
+
+typedef uint32_t xfs_agnumber_t;
+
+typedef int32_t xfs_extnum_t;
+
+typedef int64_t xfs_fsize_t;
+
+typedef int64_t xfs_lsn_t;
+
+typedef uint32_t xfs_dablk_t;
+
+typedef uint32_t xfs_dahash_t;
+
+typedef uint64_t xfs_fsblock_t;
+
+typedef uint64_t xfs_rfsblock_t;
+
+typedef uint64_t xfs_rtblock_t;
+
+typedef uint64_t xfs_fileoff_t;
+
+typedef uint64_t xfs_filblks_t;
+
+typedef void *xfs_failaddr_t;
+
+typedef enum {
+ XFS_LOOKUP_EQi = 0,
+ XFS_LOOKUP_LEi = 1,
+ XFS_LOOKUP_GEi = 2,
+} xfs_lookup_t;
+
+typedef enum {
+ XFS_BTNUM_BNOi = 0,
+ XFS_BTNUM_CNTi = 1,
+ XFS_BTNUM_RMAPi = 2,
+ XFS_BTNUM_BMAPi = 3,
+ XFS_BTNUM_INOi = 4,
+ XFS_BTNUM_FINOi = 5,
+ XFS_BTNUM_REFCi = 6,
+ XFS_BTNUM_MAX = 7,
+} xfs_btnum_t;
+
+struct xfs_name {
+ const unsigned char *name;
+ int len;
+ int type;
+};
+
+struct xfs_iext_leaf;
+
+struct xfs_iext_cursor {
+ struct xfs_iext_leaf *leaf;
+ int pos;
+};
+
+typedef enum {
+ XFS_EXT_NORM = 0,
+ XFS_EXT_UNWRITTEN = 1,
+} xfs_exntst_t;
+
+struct xfs_bmbt_irec {
+ xfs_fileoff_t br_startoff;
+ xfs_fsblock_t br_startblock;
+ xfs_filblks_t br_blockcount;
+ xfs_exntst_t br_state;
+};
+
+enum xfs_ag_resv_type {
+ XFS_AG_RESV_NONE = 0,
+ XFS_AG_RESV_AGFL = 1,
+ XFS_AG_RESV_METADATA = 2,
+ XFS_AG_RESV_RMAPBT = 3,
+};
+
+typedef struct {
+ struct rw_semaphore mr_lock;
+} mrlock_t;
+
+enum {
+ __XBTS_lookup = 0,
+ __XBTS_compare = 1,
+ __XBTS_insrec = 2,
+ __XBTS_delrec = 3,
+ __XBTS_newroot = 4,
+ __XBTS_killroot = 5,
+ __XBTS_increment = 6,
+ __XBTS_decrement = 7,
+ __XBTS_lshift = 8,
+ __XBTS_rshift = 9,
+ __XBTS_split = 10,
+ __XBTS_join = 11,
+ __XBTS_alloc = 12,
+ __XBTS_free = 13,
+ __XBTS_moves = 14,
+ __XBTS_MAX = 15,
+};
+
+struct __xfsstats {
+ uint32_t xs_allocx;
+ uint32_t xs_allocb;
+ uint32_t xs_freex;
+ uint32_t xs_freeb;
+ uint32_t xs_abt_lookup;
+ uint32_t xs_abt_compare;
+ uint32_t xs_abt_insrec;
+ uint32_t xs_abt_delrec;
+ uint32_t xs_blk_mapr;
+ uint32_t xs_blk_mapw;
+ uint32_t xs_blk_unmap;
+ uint32_t xs_add_exlist;
+ uint32_t xs_del_exlist;
+ uint32_t xs_look_exlist;
+ uint32_t xs_cmp_exlist;
+ uint32_t xs_bmbt_lookup;
+ uint32_t xs_bmbt_compare;
+ uint32_t xs_bmbt_insrec;
+ uint32_t xs_bmbt_delrec;
+ uint32_t xs_dir_lookup;
+ uint32_t xs_dir_create;
+ uint32_t xs_dir_remove;
+ uint32_t xs_dir_getdents;
+ uint32_t xs_trans_sync;
+ uint32_t xs_trans_async;
+ uint32_t xs_trans_empty;
+ uint32_t xs_ig_attempts;
+ uint32_t xs_ig_found;
+ uint32_t xs_ig_frecycle;
+ uint32_t xs_ig_missed;
+ uint32_t xs_ig_dup;
+ uint32_t xs_ig_reclaims;
+ uint32_t xs_ig_attrchg;
+ uint32_t xs_log_writes;
+ uint32_t xs_log_blocks;
+ uint32_t xs_log_noiclogs;
+ uint32_t xs_log_force;
+ uint32_t xs_log_force_sleep;
+ uint32_t xs_try_logspace;
+ uint32_t xs_sleep_logspace;
+ uint32_t xs_push_ail;
+ uint32_t xs_push_ail_success;
+ uint32_t xs_push_ail_pushbuf;
+ uint32_t xs_push_ail_pinned;
+ uint32_t xs_push_ail_locked;
+ uint32_t xs_push_ail_flushing;
+ uint32_t xs_push_ail_restarts;
+ uint32_t xs_push_ail_flush;
+ uint32_t xs_xstrat_quick;
+ uint32_t xs_xstrat_split;
+ uint32_t xs_write_calls;
+ uint32_t xs_read_calls;
+ uint32_t xs_attr_get;
+ uint32_t xs_attr_set;
+ uint32_t xs_attr_remove;
+ uint32_t xs_attr_list;
+ uint32_t xs_iflush_count;
+ uint32_t xs_icluster_flushcnt;
+ uint32_t xs_icluster_flushinode;
+ uint32_t vn_active;
+ uint32_t vn_alloc;
+ uint32_t vn_get;
+ uint32_t vn_hold;
+ uint32_t vn_rele;
+ uint32_t vn_reclaim;
+ uint32_t vn_remove;
+ uint32_t vn_free;
+ uint32_t xb_get;
+ uint32_t xb_create;
+ uint32_t xb_get_locked;
+ uint32_t xb_get_locked_waited;
+ uint32_t xb_busy_locked;
+ uint32_t xb_miss_locked;
+ uint32_t xb_page_retries;
+ uint32_t xb_page_found;
+ uint32_t xb_get_read;
+ uint32_t xs_abtb_2[15];
+ uint32_t xs_abtc_2[15];
+ uint32_t xs_bmbt_2[15];
+ uint32_t xs_ibt_2[15];
+ uint32_t xs_fibt_2[15];
+ uint32_t xs_rmap_2[15];
+ uint32_t xs_refcbt_2[15];
+ uint32_t xs_qm_dqreclaims;
+ uint32_t xs_qm_dqreclaim_misses;
+ uint32_t xs_qm_dquot_dups;
+ uint32_t xs_qm_dqcachemisses;
+ uint32_t xs_qm_dqcachehits;
+ uint32_t xs_qm_dqwants;
+ uint32_t xs_qm_dquot;
+ uint32_t xs_qm_dquot_unused;
+ uint64_t xs_xstrat_bytes;
+ uint64_t xs_write_bytes;
+ uint64_t xs_read_bytes;
+};
+
+struct xfsstats {
+ union {
+ struct __xfsstats s;
+ uint32_t a[187];
+ };
+};
+
+struct xfs_kobj {
+ struct kobject kobject;
+ struct completion complete;
+};
+
+struct xstats {
+ struct xfsstats *xs_stats;
+ struct xfs_kobj xs_kobj;
+};
+
+typedef unsigned int xfs_buf_flags_t;
+
+struct xfs_mount;
+
+struct xfs_buftarg {
+ dev_t bt_dev;
+ struct block_device *bt_bdev;
+ struct dax_device *bt_daxdev;
+ struct xfs_mount *bt_mount;
+ unsigned int bt_meta_sectorsize;
+ size_t bt_meta_sectormask;
+ size_t bt_logical_sectorsize;
+ size_t bt_logical_sectormask;
+ struct shrinker bt_shrinker;
+ struct list_lru bt_lru;
+ struct percpu_counter bt_io_count;
+ struct ratelimit_state bt_ioerror_rl;
+};
+
+struct xfs_sb {
+ uint32_t sb_magicnum;
+ uint32_t sb_blocksize;
+ xfs_rfsblock_t sb_dblocks;
+ xfs_rfsblock_t sb_rblocks;
+ xfs_rtblock_t sb_rextents;
+ uuid_t sb_uuid;
+ xfs_fsblock_t sb_logstart;
+ xfs_ino_t sb_rootino;
+ xfs_ino_t sb_rbmino;
+ xfs_ino_t sb_rsumino;
+ xfs_agblock_t sb_rextsize;
+ xfs_agblock_t sb_agblocks;
+ xfs_agnumber_t sb_agcount;
+ xfs_extlen_t sb_rbmblocks;
+ xfs_extlen_t sb_logblocks;
+ uint16_t sb_versionnum;
+ uint16_t sb_sectsize;
+ uint16_t sb_inodesize;
+ uint16_t sb_inopblock;
+ char sb_fname[12];
+ uint8_t sb_blocklog;
+ uint8_t sb_sectlog;
+ uint8_t sb_inodelog;
+ uint8_t sb_inopblog;
+ uint8_t sb_agblklog;
+ uint8_t sb_rextslog;
+ uint8_t sb_inprogress;
+ uint8_t sb_imax_pct;
+ uint64_t sb_icount;
+ uint64_t sb_ifree;
+ uint64_t sb_fdblocks;
+ uint64_t sb_frextents;
+ xfs_ino_t sb_uquotino;
+ xfs_ino_t sb_gquotino;
+ uint16_t sb_qflags;
+ uint8_t sb_flags;
+ uint8_t sb_shared_vn;
+ xfs_extlen_t sb_inoalignmt;
+ uint32_t sb_unit;
+ uint32_t sb_width;
+ uint8_t sb_dirblklog;
+ uint8_t sb_logsectlog;
+ uint16_t sb_logsectsize;
+ uint32_t sb_logsunit;
+ uint32_t sb_features2;
+ uint32_t sb_bad_features2;
+ uint32_t sb_features_compat;
+ uint32_t sb_features_ro_compat;
+ uint32_t sb_features_incompat;
+ uint32_t sb_features_log_incompat;
+ uint32_t sb_crc;
+ xfs_extlen_t sb_spino_align;
+ xfs_ino_t sb_pquotino;
+ xfs_lsn_t sb_lsn;
+ uuid_t sb_meta_uuid;
+};
+
+typedef struct xfs_buftarg xfs_buftarg_t;
+
+struct xfs_mru_cache;
+
+struct xfs_ino_geometry {
+ uint64_t maxicount;
+ unsigned int inode_cluster_size;
+ unsigned int inode_cluster_size_raw;
+ unsigned int inodes_per_cluster;
+ unsigned int blocks_per_cluster;
+ unsigned int cluster_align;
+ unsigned int cluster_align_inodes;
+ unsigned int inoalign_mask;
+ unsigned int inobt_mxr[2];
+ unsigned int inobt_mnr[2];
+ unsigned int inobt_maxlevels;
+ unsigned int ialloc_inos;
+ unsigned int ialloc_blks;
+ unsigned int ialloc_min_blks;
+ unsigned int ialloc_align;
+ unsigned int agino_log;
+};
+
+struct xfs_trans_res {
+ uint tr_logres;
+ int tr_logcount;
+ int tr_logflags;
+};
+
+struct xfs_trans_resv {
+ struct xfs_trans_res tr_write;
+ struct xfs_trans_res tr_itruncate;
+ struct xfs_trans_res tr_rename;
+ struct xfs_trans_res tr_link;
+ struct xfs_trans_res tr_remove;
+ struct xfs_trans_res tr_symlink;
+ struct xfs_trans_res tr_create;
+ struct xfs_trans_res tr_create_tmpfile;
+ struct xfs_trans_res tr_mkdir;
+ struct xfs_trans_res tr_ifree;
+ struct xfs_trans_res tr_ichange;
+ struct xfs_trans_res tr_growdata;
+ struct xfs_trans_res tr_addafork;
+ struct xfs_trans_res tr_writeid;
+ struct xfs_trans_res tr_attrinval;
+ struct xfs_trans_res tr_attrsetm;
+ struct xfs_trans_res tr_attrsetrt;
+ struct xfs_trans_res tr_attrrm;
+ struct xfs_trans_res tr_clearagi;
+ struct xfs_trans_res tr_growrtalloc;
+ struct xfs_trans_res tr_growrtzero;
+ struct xfs_trans_res tr_growrtfree;
+ struct xfs_trans_res tr_qm_setqlim;
+ struct xfs_trans_res tr_qm_dqalloc;
+ struct xfs_trans_res tr_qm_quotaoff;
+ struct xfs_trans_res tr_qm_equotaoff;
+ struct xfs_trans_res tr_sb;
+ struct xfs_trans_res tr_fsyncts;
+};
+
+struct xfs_error_cfg {
+ struct xfs_kobj kobj;
+ int max_retries;
+ long int retry_timeout;
+};
+
+struct xfs_ail;
+
+struct xfs_buf;
+
+struct xfs_da_geometry;
+
+struct xlog;
+
+struct xfs_inode;
+
+struct xfs_quotainfo;
+
+struct xfs_mount {
+ struct xfs_sb m_sb;
+ struct super_block *m_super;
+ struct xfs_ail *m_ail;
+ struct xfs_buf *m_sb_bp;
+ char *m_rtname;
+ char *m_logname;
+ struct xfs_da_geometry *m_dir_geo;
+ struct xfs_da_geometry *m_attr_geo;
+ struct xlog *m_log;
+ struct xfs_inode *m_rbmip;
+ struct xfs_inode *m_rsumip;
+ struct xfs_inode *m_rootip;
+ struct xfs_quotainfo *m_quotainfo;
+ xfs_buftarg_t *m_ddev_targp;
+ xfs_buftarg_t *m_logdev_targp;
+ xfs_buftarg_t *m_rtdev_targp;
+ uint8_t *m_rsum_cache;
+ struct xfs_mru_cache *m_filestream;
+ struct workqueue_struct *m_buf_workqueue;
+ struct workqueue_struct *m_unwritten_workqueue;
+ struct workqueue_struct *m_cil_workqueue;
+ struct workqueue_struct *m_reclaim_workqueue;
+ struct workqueue_struct *m_eofblocks_workqueue;
+ struct workqueue_struct *m_sync_workqueue;
+ int m_bsize;
+ uint8_t m_blkbit_log;
+ uint8_t m_blkbb_log;
+ uint8_t m_agno_log;
+ uint8_t m_sectbb_log;
+ uint m_blockmask;
+ uint m_blockwsize;
+ uint m_blockwmask;
+ uint m_alloc_mxr[2];
+ uint m_alloc_mnr[2];
+ uint m_bmap_dmxr[2];
+ uint m_bmap_dmnr[2];
+ uint m_rmap_mxr[2];
+ uint m_rmap_mnr[2];
+ uint m_refc_mxr[2];
+ uint m_refc_mnr[2];
+ uint m_ag_maxlevels;
+ uint m_bm_maxlevels[2];
+ uint m_rmap_maxlevels;
+ uint m_refc_maxlevels;
+ xfs_extlen_t m_ag_prealloc_blocks;
+ uint m_alloc_set_aside;
+ uint m_ag_max_usable;
+ int m_dalign;
+ int m_swidth;
+ xfs_agnumber_t m_maxagi;
+ uint m_allocsize_log;
+ uint m_allocsize_blocks;
+ int m_logbufs;
+ int m_logbsize;
+ uint m_rsumlevels;
+ uint m_rsumsize;
+ int m_fixedfsid[2];
+ uint m_qflags;
+ uint64_t m_flags;
+ int64_t m_low_space[5];
+ struct xfs_ino_geometry m_ino_geo;
+ struct xfs_trans_resv m_resv;
+ bool m_always_cow;
+ bool m_fail_unmount;
+ bool m_finobt_nores;
+ bool m_update_sb;
+ uint8_t m_fs_checked;
+ uint8_t m_fs_sick;
+ uint8_t m_rt_checked;
+ uint8_t m_rt_sick;
+ spinlock_t m_sb_lock;
+ struct percpu_counter m_icount;
+ struct percpu_counter m_ifree;
+ struct percpu_counter m_fdblocks;
+ struct percpu_counter m_delalloc_blks;
+ struct xarray m_perag_tree;
+ spinlock_t m_perag_lock;
+ uint64_t m_resblks;
+ uint64_t m_resblks_avail;
+ uint64_t m_resblks_save;
+ struct delayed_work m_reclaim_work;
+ struct delayed_work m_eofblocks_work;
+ struct delayed_work m_cowblocks_work;
+ struct xfs_kobj m_kobj;
+ struct xfs_kobj m_error_kobj;
+ struct xfs_kobj m_error_meta_kobj;
+ struct xfs_error_cfg m_error_cfg[4];
+ struct xstats m_stats;
+ xfs_agnumber_t m_agfrotor;
+ xfs_agnumber_t m_agirotor;
+ spinlock_t m_agirotor_lock;
+ struct work_struct m_flush_inodes_work;
+ uint32_t m_generation;
+ struct mutex m_growlock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
+
+struct xfs_buf_map {
+ xfs_daddr_t bm_bn;
+ int bm_len;
+};
+
+struct xfs_perag;
+
+struct xfs_buf_log_item;
+
+struct xfs_trans;
+
+struct xfs_buf_ops;
+
+struct xfs_buf {
+ struct rhash_head b_rhash_head;
+ xfs_daddr_t b_bn;
+ int b_length;
+ atomic_t b_hold;
+ atomic_t b_lru_ref;
+ xfs_buf_flags_t b_flags;
+ struct semaphore b_sema;
+ struct list_head b_lru;
+ spinlock_t b_lock;
+ unsigned int b_state;
+ int b_io_error;
+ wait_queue_head_t b_waiters;
+ struct list_head b_list;
+ struct xfs_perag *b_pag;
+ struct xfs_mount *b_mount;
+ xfs_buftarg_t *b_target;
+ void *b_addr;
+ struct work_struct b_ioend_work;
+ xfs_buf_iodone_t b_iodone;
+ struct completion b_iowait;
+ struct xfs_buf_log_item *b_log_item;
+ struct list_head b_li_list;
+ struct xfs_trans *b_transp;
+ struct page **b_pages;
+ struct page *b_page_array[2];
+ struct xfs_buf_map *b_maps;
+ struct xfs_buf_map __b_map;
+ int b_map_count;
+ atomic_t b_pin_count;
+ atomic_t b_io_remaining;
+ unsigned int b_page_count;
+ unsigned int b_offset;
+ int b_error;
+ int b_retries;
+ long unsigned int b_first_retry_time;
+ int b_last_error;
+ const struct xfs_buf_ops *b_ops;
+};
+
+struct xfs_buf_ops {
+ char *name;
+ union {
+ __be32 magic[2];
+ __be16 magic16[2];
+ };
+ void (*verify_read)(struct xfs_buf *);
+ void (*verify_write)(struct xfs_buf *);
+ xfs_failaddr_t (*verify_struct)(struct xfs_buf *);
+};
+
+struct xfs_ag_resv {
+ xfs_extlen_t ar_orig_reserved;
+ xfs_extlen_t ar_reserved;
+ xfs_extlen_t ar_asked;
+};
+
+struct xfs_perag {
+ struct xfs_mount *pag_mount;
+ xfs_agnumber_t pag_agno;
+ atomic_t pag_ref;
+ char pagf_init;
+ char pagi_init;
+ char pagf_metadata;
+ char pagi_inodeok;
+ uint8_t pagf_levels[3];
+ bool pagf_agflreset;
+ uint32_t pagf_flcount;
+ xfs_extlen_t pagf_freeblks;
+ xfs_extlen_t pagf_longest;
+ uint32_t pagf_btreeblks;
+ xfs_agino_t pagi_freecount;
+ xfs_agino_t pagi_count;
+ xfs_agino_t pagl_pagino;
+ xfs_agino_t pagl_leftrec;
+ xfs_agino_t pagl_rightrec;
+ uint16_t pag_checked;
+ uint16_t pag_sick;
+ spinlock_t pag_state_lock;
+ spinlock_t pagb_lock;
+ struct rb_root pagb_tree;
+ unsigned int pagb_gen;
+ wait_queue_head_t pagb_wait;
+ atomic_t pagf_fstrms;
+ spinlock_t pag_ici_lock;
+ struct xarray pag_ici_root;
+ int pag_ici_reclaimable;
+ struct mutex pag_ici_reclaim_lock;
+ long unsigned int pag_ici_reclaim_cursor;
+ spinlock_t pag_buf_lock;
+ struct rhashtable pag_buf_hash;
+ struct callback_head callback_head;
+ int pagb_count;
+ struct xfs_ag_resv pag_meta_resv;
+ struct xfs_ag_resv pag_rmapbt_resv;
+ uint8_t pagf_refcount_level;
+ struct rhashtable pagi_unlinked_hash;
+};
+
+struct xfs_item_ops;
+
+struct xfs_log_vec;
+
+struct xfs_log_item {
+ struct list_head li_ail;
+ struct list_head li_trans;
+ xfs_lsn_t li_lsn;
+ struct xfs_mount *li_mountp;
+ struct xfs_ail *li_ailp;
+ uint li_type;
+ long unsigned int li_flags;
+ struct xfs_buf *li_buf;
+ struct list_head li_bio_list;
+ void (*li_cb)(struct xfs_buf *, struct xfs_log_item *);
+ const struct xfs_item_ops *li_ops;
+ struct list_head li_cil;
+ struct xfs_log_vec *li_lv;
+ struct xfs_log_vec *li_lv_shadow;
+ xfs_lsn_t li_seq;
+};
+
+struct xfs_buf_log_format {
+ short unsigned int blf_type;
+ short unsigned int blf_size;
+ short unsigned int blf_flags;
+ short unsigned int blf_len;
+ int64_t blf_blkno;
+ unsigned int blf_map_size;
+ unsigned int blf_data_map[17];
+};
+
+struct xfs_buf_log_item {
+ struct xfs_log_item bli_item;
+ struct xfs_buf *bli_buf;
+ unsigned int bli_flags;
+ unsigned int bli_recur;
+ atomic_t bli_refcount;
+ int bli_format_count;
+ struct xfs_buf_log_format *bli_formats;
+ struct xfs_buf_log_format __bli_format;
+};
+
+struct xlog_ticket;
+
+struct xfs_dquot_acct;
+
+struct xfs_trans {
+ unsigned int t_magic;
+ unsigned int t_log_res;
+ unsigned int t_log_count;
+ unsigned int t_blk_res;
+ unsigned int t_blk_res_used;
+ unsigned int t_rtx_res;
+ unsigned int t_rtx_res_used;
+ unsigned int t_flags;
+ xfs_fsblock_t t_firstblock;
+ struct xlog_ticket *t_ticket;
+ struct xfs_mount *t_mountp;
+ struct xfs_dquot_acct *t_dqinfo;
+ int64_t t_icount_delta;
+ int64_t t_ifree_delta;
+ int64_t t_fdblocks_delta;
+ int64_t t_res_fdblocks_delta;
+ int64_t t_frextents_delta;
+ int64_t t_res_frextents_delta;
+ int64_t t_dblocks_delta;
+ int64_t t_agcount_delta;
+ int64_t t_imaxpct_delta;
+ int64_t t_rextsize_delta;
+ int64_t t_rbmblocks_delta;
+ int64_t t_rblocks_delta;
+ int64_t t_rextents_delta;
+ int64_t t_rextslog_delta;
+ struct list_head t_items;
+ struct list_head t_busy;
+ struct list_head t_dfops;
+ long unsigned int t_pflags;
+};
+
+typedef struct xfs_buf xfs_buf_t;
+
+struct xfs_agf {
+ __be32 agf_magicnum;
+ __be32 agf_versionnum;
+ __be32 agf_seqno;
+ __be32 agf_length;
+ __be32 agf_roots[3];
+ __be32 agf_levels[3];
+ __be32 agf_flfirst;
+ __be32 agf_fllast;
+ __be32 agf_flcount;
+ __be32 agf_freeblks;
+ __be32 agf_longest;
+ __be32 agf_btreeblks;
+ uuid_t agf_uuid;
+ __be32 agf_rmap_blocks;
+ __be32 agf_refcount_blocks;
+ __be32 agf_refcount_root;
+ __be32 agf_refcount_level;
+ __be64 agf_spare64[14];
+ __be64 agf_lsn;
+ __be32 agf_crc;
+ __be32 agf_spare2;
+};
+
+enum xfs_dinode_fmt {
+ XFS_DINODE_FMT_DEV = 0,
+ XFS_DINODE_FMT_LOCAL = 1,
+ XFS_DINODE_FMT_EXTENTS = 2,
+ XFS_DINODE_FMT_BTREE = 3,
+ XFS_DINODE_FMT_UUID = 4,
+};
+
+struct xfs_disk_dquot {
+ __be16 d_magic;
+ __u8 d_version;
+ __u8 d_flags;
+ __be32 d_id;
+ __be64 d_blk_hardlimit;
+ __be64 d_blk_softlimit;
+ __be64 d_ino_hardlimit;
+ __be64 d_ino_softlimit;
+ __be64 d_bcount;
+ __be64 d_icount;
+ __be32 d_itimer;
+ __be32 d_btimer;
+ __be16 d_iwarns;
+ __be16 d_bwarns;
+ __be32 d_pad0;
+ __be64 d_rtb_hardlimit;
+ __be64 d_rtb_softlimit;
+ __be64 d_rtbcount;
+ __be32 d_rtbtimer;
+ __be16 d_rtbwarns;
+ __be16 d_pad;
+};
+
+struct xfs_alloc_rec {
+ __be32 ar_startblock;
+ __be32 ar_blockcount;
+};
+
+typedef struct xfs_alloc_rec xfs_alloc_key_t;
+
+struct xfs_alloc_rec_incore {
+ xfs_agblock_t ar_startblock;
+ xfs_extlen_t ar_blockcount;
+};
+
+typedef uint64_t xfs_inofree_t;
+
+struct xfs_inobt_rec {
+ __be32 ir_startino;
+ union {
+ struct {
+ __be32 ir_freecount;
+ } f;
+ struct {
+ __be16 ir_holemask;
+ __u8 ir_count;
+ __u8 ir_freecount;
+ } sp;
+ } ir_u;
+ __be64 ir_free;
+};
+
+struct xfs_inobt_rec_incore {
+ xfs_agino_t ir_startino;
+ uint16_t ir_holemask;
+ uint8_t ir_count;
+ uint8_t ir_freecount;
+ xfs_inofree_t ir_free;
+};
+
+struct xfs_inobt_key {
+ __be32 ir_startino;
+};
+
+struct xfs_owner_info {
+ uint64_t oi_owner;
+ xfs_fileoff_t oi_offset;
+ unsigned int oi_flags;
+};
+
+struct xfs_rmap_rec {
+ __be32 rm_startblock;
+ __be32 rm_blockcount;
+ __be64 rm_owner;
+ __be64 rm_offset;
+};
+
+struct xfs_rmap_irec {
+ xfs_agblock_t rm_startblock;
+ xfs_extlen_t rm_blockcount;
+ uint64_t rm_owner;
+ uint64_t rm_offset;
+ unsigned int rm_flags;
+};
+
+struct xfs_rmap_key {
+ __be32 rm_startblock;
+ __be64 rm_owner;
+ __be64 rm_offset;
+} __attribute__((packed));
+
+struct xfs_refcount_rec {
+ __be32 rc_startblock;
+ __be32 rc_blockcount;
+ __be32 rc_refcount;
+};
+
+struct xfs_refcount_key {
+ __be32 rc_startblock;
+};
+
+struct xfs_refcount_irec {
+ xfs_agblock_t rc_startblock;
+ xfs_extlen_t rc_blockcount;
+ xfs_nlink_t rc_refcount;
+};
+
+struct xfs_bmbt_rec {
+ __be64 l0;
+ __be64 l1;
+};
+
+typedef struct xfs_bmbt_rec xfs_bmbt_rec_t;
+
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
+
+struct xfs_bmbt_key {
+ __be64 br_startoff;
+};
+
+typedef struct xfs_bmbt_key xfs_bmdr_key_t;
+
+struct xfs_btree_block_shdr {
+ __be32 bb_leftsib;
+ __be32 bb_rightsib;
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be32 bb_owner;
+ __le32 bb_crc;
+};
+
+struct xfs_btree_block_lhdr {
+ __be64 bb_leftsib;
+ __be64 bb_rightsib;
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be64 bb_owner;
+ __le32 bb_crc;
+ __be32 bb_pad;
+};
+
+struct xfs_btree_block {
+ __be32 bb_magic;
+ __be16 bb_level;
+ __be16 bb_numrecs;
+ union {
+ struct xfs_btree_block_shdr s;
+ struct xfs_btree_block_lhdr l;
+ } bb_u;
+};
+
+typedef uint32_t xlog_tid_t;
+
+struct xlog_rec_header {
+ __be32 h_magicno;
+ __be32 h_cycle;
+ __be32 h_version;
+ __be32 h_len;
+ __be64 h_lsn;
+ __be64 h_tail_lsn;
+ __le32 h_crc;
+ __be32 h_prev_block;
+ __be32 h_num_logops;
+ __be32 h_cycle_data[64];
+ __be32 h_fmt;
+ uuid_t h_fs_uuid;
+ __be32 h_size;
+};
+
+typedef struct xlog_rec_header xlog_rec_header_t;
+
+struct xlog_rec_ext_header {
+ __be32 xh_cycle;
+ __be32 xh_cycle_data[64];
+};
+
+typedef struct xlog_rec_ext_header xlog_rec_ext_header_t;
+
+union xlog_in_core2 {
+ xlog_rec_header_t hic_header;
+ xlog_rec_ext_header_t hic_xheader;
+ char hic_sector[512];
+};
+
+typedef union xlog_in_core2 xlog_in_core_2_t;
+
+struct xfs_log_iovec {
+ void *i_addr;
+ int i_len;
+ uint i_type;
+};
+
+struct xfs_trans_header {
+ uint th_magic;
+ uint th_type;
+ int32_t th_tid;
+ uint th_num_items;
+};
+
+typedef struct xfs_trans_header xfs_trans_header_t;
+
+struct xfs_inode_log_format {
+ uint16_t ilf_type;
+ uint16_t ilf_size;
+ uint32_t ilf_fields;
+ uint16_t ilf_asize;
+ uint16_t ilf_dsize;
+ uint32_t ilf_pad;
+ uint64_t ilf_ino;
+ union {
+ uint32_t ilfu_rdev;
+ uint8_t __pad[16];
+ } ilf_u;
+ int64_t ilf_blkno;
+ int32_t ilf_len;
+ int32_t ilf_boffset;
+};
+
+struct xfs_icreate_log {
+ uint16_t icl_type;
+ uint16_t icl_size;
+ __be32 icl_ag;
+ __be32 icl_agbno;
+ __be32 icl_count;
+ __be32 icl_isize;
+ __be32 icl_length;
+ __be32 icl_gen;
+};
+
+enum {
+ XFS_LOWSP_1_PCNT = 0,
+ XFS_LOWSP_2_PCNT = 1,
+ XFS_LOWSP_3_PCNT = 2,
+ XFS_LOWSP_4_PCNT = 3,
+ XFS_LOWSP_5_PCNT = 4,
+ XFS_LOWSP_MAX = 5,
+};
+
+enum {
+ XFS_ERR_METADATA = 0,
+ XFS_ERR_CLASS_MAX = 1,
+};
+
+enum {
+ XFS_ERR_DEFAULT = 0,
+ XFS_ERR_EIO = 1,
+ XFS_ERR_ENOSPC = 2,
+ XFS_ERR_ENODEV = 3,
+ XFS_ERR_ERRNO_MAX = 4,
+};
+
+typedef uint xfs_dir2_data_aoff_t;
+
+struct xfs_da_geometry {
+ unsigned int blksize;
+ unsigned int fsbcount;
+ uint8_t fsblog;
+ uint8_t blklog;
+ unsigned int node_hdr_size;
+ unsigned int node_ents;
+ unsigned int magicpct;
+ xfs_dablk_t datablk;
+ unsigned int leaf_hdr_size;
+ unsigned int leaf_max_ents;
+ xfs_dablk_t leafblk;
+ unsigned int free_hdr_size;
+ unsigned int free_max_bests;
+ xfs_dablk_t freeblk;
+ xfs_dir2_data_aoff_t data_first_offset;
+ size_t data_entry_offset;
+};
+
+struct xlog_grant_head {
+ spinlock_t lock;
+ struct list_head waiters;
+ atomic64_t grant;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xfs_cil;
+
+struct xlog_in_core;
+
+typedef struct xlog_in_core xlog_in_core_t;
+
+struct xlog {
+ struct xfs_mount *l_mp;
+ struct xfs_ail *l_ailp;
+ struct xfs_cil *l_cilp;
+ struct xfs_buftarg *l_targ;
+ struct workqueue_struct *l_ioend_workqueue;
+ struct delayed_work l_work;
+ uint l_flags;
+ uint l_quotaoffs_flag;
+ struct list_head *l_buf_cancel_table;
+ int l_iclog_hsize;
+ int l_iclog_heads;
+ uint l_sectBBsize;
+ int l_iclog_size;
+ int l_iclog_bufs;
+ xfs_daddr_t l_logBBstart;
+ int l_logsize;
+ int l_logBBsize;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ wait_queue_head_t l_flush_wait;
+ int l_covered_state;
+ xlog_in_core_t *l_iclog;
+ spinlock_t l_icloglock;
+ int l_curr_cycle;
+ int l_prev_cycle;
+ int l_curr_block;
+ int l_prev_block;
+ long: 64;
+ long: 64;
+ atomic64_t l_last_sync_lsn;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ atomic64_t l_tail_lsn;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct xlog_grant_head l_reserve_head;
+ struct xlog_grant_head l_write_head;
+ struct xfs_kobj l_kobj;
+ xfs_lsn_t l_recovery_lsn;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xfs_imap {
+ xfs_daddr_t im_blkno;
+ short unsigned int im_len;
+ short unsigned int im_boffset;
+};
+
+struct xfs_ifork {
+ int64_t if_bytes;
+ struct xfs_btree_block *if_broot;
+ unsigned int if_seq;
+ int if_height;
+ union {
+ void *if_root;
+ char *if_data;
+ } if_u1;
+ short int if_broot_bytes;
+ unsigned char if_flags;
+ int8_t if_format;
+ xfs_extnum_t if_nextents;
+};
+
+struct xfs_icdinode {
+ uint16_t di_flushiter;
+ uint32_t di_projid;
+ xfs_fsize_t di_size;
+ xfs_rfsblock_t di_nblocks;
+ xfs_extlen_t di_extsize;
+ uint8_t di_forkoff;
+ uint32_t di_dmevmask;
+ uint16_t di_dmstate;
+ uint16_t di_flags;
+ uint64_t di_flags2;
+ uint32_t di_cowextsize;
+ struct timespec64 di_crtime;
+};
+
+struct xfs_dquot;
+
+struct xfs_inode_log_item;
+
+struct xfs_inode {
+ struct xfs_mount *i_mount;
+ struct xfs_dquot *i_udquot;
+ struct xfs_dquot *i_gdquot;
+ struct xfs_dquot *i_pdquot;
+ xfs_ino_t i_ino;
+ struct xfs_imap i_imap;
+ struct xfs_ifork *i_afp;
+ struct xfs_ifork *i_cowfp;
+ struct xfs_ifork i_df;
+ struct xfs_inode_log_item *i_itemp;
+ mrlock_t i_lock;
+ mrlock_t i_mmaplock;
+ atomic_t i_pincount;
+ uint16_t i_checked;
+ uint16_t i_sick;
+ spinlock_t i_flags_lock;
+ long unsigned int i_flags;
+ uint64_t i_delayed_blks;
+ struct xfs_icdinode i_d;
+ struct inode i_vnode;
+ spinlock_t i_ioend_lock;
+ struct work_struct i_ioend_work;
+ struct list_head i_ioend_list;
+};
+
+enum xfs_defer_ops_type {
+ XFS_DEFER_OPS_TYPE_BMAP = 0,
+ XFS_DEFER_OPS_TYPE_REFCOUNT = 1,
+ XFS_DEFER_OPS_TYPE_RMAP = 2,
+ XFS_DEFER_OPS_TYPE_FREE = 3,
+ XFS_DEFER_OPS_TYPE_AGFL_FREE = 4,
+ XFS_DEFER_OPS_TYPE_MAX = 5,
+};
+
+struct xfs_defer_pending {
+ struct list_head dfp_list;
+ struct list_head dfp_work;
+ struct xfs_log_item *dfp_intent;
+ struct xfs_log_item *dfp_done;
+ unsigned int dfp_count;
+ enum xfs_defer_ops_type dfp_type;
+};
+
+struct xfs_btree_cur;
+
+struct xfs_defer_op_type {
+ struct xfs_log_item * (*create_intent)(struct xfs_trans *, struct list_head *, unsigned int, bool);
+ void (*abort_intent)(struct xfs_log_item *);
+ struct xfs_log_item * (*create_done)(struct xfs_trans *, struct xfs_log_item *, unsigned int);
+ int (*finish_item)(struct xfs_trans *, struct xfs_log_item *, struct list_head *, struct xfs_btree_cur **);
+ void (*finish_cleanup)(struct xfs_trans *, struct xfs_btree_cur *, int);
+ void (*cancel_item)(struct list_head *);
+ unsigned int max_items;
+};
+
+union xfs_btree_irec {
+ struct xfs_alloc_rec_incore a;
+ struct xfs_bmbt_irec b;
+ struct xfs_inobt_rec_incore i;
+ struct xfs_rmap_irec r;
+ struct xfs_refcount_irec rc;
+};
+
+struct xbtree_afakeroot;
+
+struct xfs_btree_cur_ag {
+ union {
+ struct xfs_buf *agbp;
+ struct xbtree_afakeroot *afake;
+ };
+ xfs_agnumber_t agno;
+ union {
+ struct {
+ long unsigned int nr_ops;
+ int shape_changes;
+ } refc;
+ struct {
+ bool active;
+ } abt;
+ };
+};
+
+struct xbtree_ifakeroot;
+
+struct xfs_btree_cur_ino {
+ struct xfs_inode *ip;
+ struct xbtree_ifakeroot *ifake;
+ int allocated;
+ short int forksize;
+ char whichfork;
+ char flags;
+};
+
+struct xfs_btree_ops;
+
+struct xfs_btree_cur {
+ struct xfs_trans *bc_tp;
+ struct xfs_mount *bc_mp;
+ const struct xfs_btree_ops *bc_ops;
+ uint bc_flags;
+ union xfs_btree_irec bc_rec;
+ struct xfs_buf *bc_bufs[9];
+ int bc_ptrs[9];
+ uint8_t bc_ra[9];
+ uint8_t bc_nlevels;
+ uint8_t bc_blocklog;
+ xfs_btnum_t bc_btnum;
+ int bc_statoff;
+ union {
+ struct xfs_btree_cur_ag bc_ag;
+ struct xfs_btree_cur_ino bc_ino;
+ };
+};
+
+struct xfs_da_node_entry {
+ __be32 hashval;
+ __be32 before;
+};
+
+struct xfs_dq_logitem {
+ struct xfs_log_item qli_item;
+ struct xfs_dquot *qli_dquot;
+ xfs_lsn_t qli_flush_lsn;
+};
+
+typedef uint64_t xfs_qcnt_t;
+
+struct xfs_dquot {
+ uint dq_flags;
+ struct list_head q_lru;
+ struct xfs_mount *q_mount;
+ uint q_nrefs;
+ xfs_daddr_t q_blkno;
+ int q_bufoffset;
+ xfs_fileoff_t q_fileoffset;
+ struct xfs_disk_dquot q_core;
+ struct xfs_dq_logitem q_logitem;
+ xfs_qcnt_t q_res_bcount;
+ xfs_qcnt_t q_res_icount;
+ xfs_qcnt_t q_res_rtbcount;
+ xfs_qcnt_t q_prealloc_lo_wmark;
+ xfs_qcnt_t q_prealloc_hi_wmark;
+ int64_t q_low_space[3];
+ struct mutex q_qlock;
+ struct completion q_flush;
+ atomic_t q_pincount;
+ struct wait_queue_head q_pinwait;
+};
+
+union xfs_btree_ptr {
+ __be32 s;
+ __be64 l;
+};
+
+union xfs_btree_key {
+ struct xfs_bmbt_key bmbt;
+ xfs_bmdr_key_t bmbr;
+ xfs_alloc_key_t alloc;
+ struct xfs_inobt_key inobt;
+ struct xfs_rmap_key rmap;
+ struct xfs_rmap_key __rmap_bigkey[2];
+ struct xfs_refcount_key refc;
+};
+
+union xfs_btree_rec {
+ struct xfs_bmbt_rec bmbt;
+ xfs_bmdr_rec_t bmbr;
+ struct xfs_alloc_rec alloc;
+ struct xfs_inobt_rec inobt;
+ struct xfs_rmap_rec rmap;
+ struct xfs_refcount_rec refc;
+};
+
+struct xfs_btree_ops {
+ size_t key_len;
+ size_t rec_len;
+ struct xfs_btree_cur * (*dup_cursor)(struct xfs_btree_cur *);
+ void (*update_cursor)(struct xfs_btree_cur *, struct xfs_btree_cur *);
+ void (*set_root)(struct xfs_btree_cur *, union xfs_btree_ptr *, int);
+ int (*alloc_block)(struct xfs_btree_cur *, union xfs_btree_ptr *, union xfs_btree_ptr *, int *);
+ int (*free_block)(struct xfs_btree_cur *, struct xfs_buf *);
+ void (*update_lastrec)(struct xfs_btree_cur *, struct xfs_btree_block *, union xfs_btree_rec *, int, int);
+ int (*get_minrecs)(struct xfs_btree_cur *, int);
+ int (*get_maxrecs)(struct xfs_btree_cur *, int);
+ int (*get_dmaxrecs)(struct xfs_btree_cur *, int);
+ void (*init_key_from_rec)(union xfs_btree_key *, union xfs_btree_rec *);
+ void (*init_rec_from_cur)(struct xfs_btree_cur *, union xfs_btree_rec *);
+ void (*init_ptr_from_cur)(struct xfs_btree_cur *, union xfs_btree_ptr *);
+ void (*init_high_key_from_rec)(union xfs_btree_key *, union xfs_btree_rec *);
+ int64_t (*key_diff)(struct xfs_btree_cur *, union xfs_btree_key *);
+ int64_t (*diff_two_keys)(struct xfs_btree_cur *, union xfs_btree_key *, union xfs_btree_key *);
+ const struct xfs_buf_ops *buf_ops;
+ int (*keys_inorder)(struct xfs_btree_cur *, union xfs_btree_key *, union xfs_btree_key *);
+ int (*recs_inorder)(struct xfs_btree_cur *, union xfs_btree_rec *, union xfs_btree_rec *);
+};
+
+struct xbtree_afakeroot {
+ xfs_agblock_t af_root;
+ unsigned int af_levels;
+ unsigned int af_blocks;
+};
+
+struct xbtree_ifakeroot {
+ struct xfs_ifork *if_fork;
+ int64_t if_blocks;
+ unsigned int if_levels;
+ unsigned int if_fork_size;
+ unsigned int if_format;
+ unsigned int if_extents;
+};
+
+enum xfs_dacmp {
+ XFS_CMP_DIFFERENT = 0,
+ XFS_CMP_EXACT = 1,
+ XFS_CMP_CASE = 2,
+};
+
+struct xfs_da_args {
+ struct xfs_da_geometry *geo;
+ const uint8_t *name;
+ int namelen;
+ uint8_t filetype;
+ void *value;
+ int valuelen;
+ unsigned int attr_filter;
+ unsigned int attr_flags;
+ xfs_dahash_t hashval;
+ xfs_ino_t inumber;
+ struct xfs_inode *dp;
+ struct xfs_trans *trans;
+ xfs_extlen_t total;
+ int whichfork;
+ xfs_dablk_t blkno;
+ int index;
+ xfs_dablk_t rmtblkno;
+ int rmtblkcnt;
+ int rmtvaluelen;
+ xfs_dablk_t blkno2;
+ int index2;
+ xfs_dablk_t rmtblkno2;
+ int rmtblkcnt2;
+ int rmtvaluelen2;
+ int op_flags;
+ enum xfs_dacmp cmpresult;
+};
+
+typedef unsigned int xfs_alloctype_t;
+
+struct xfs_alloc_arg {
+ struct xfs_trans *tp;
+ struct xfs_mount *mp;
+ struct xfs_buf *agbp;
+ struct xfs_perag *pag;
+ xfs_fsblock_t fsbno;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t minlen;
+ xfs_extlen_t maxlen;
+ xfs_extlen_t mod;
+ xfs_extlen_t prod;
+ xfs_extlen_t minleft;
+ xfs_extlen_t total;
+ xfs_extlen_t alignment;
+ xfs_extlen_t minalignslop;
+ xfs_agblock_t min_agbno;
+ xfs_agblock_t max_agbno;
+ xfs_extlen_t len;
+ xfs_alloctype_t type;
+ xfs_alloctype_t otype;
+ int datatype;
+ char wasdel;
+ char wasfromfl;
+ struct xfs_owner_info oinfo;
+ enum xfs_ag_resv_type resv;
+};
+
+struct xfs_attrlist_cursor_kern {
+ __u32 hashval;
+ __u32 blkno;
+ __u32 offset;
+ __u16 pad1;
+ __u8 pad2;
+ __u8 initted;
+};
+
+struct xfs_attr_list_context;
+
+typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int, unsigned char *, int, int);
+
+struct xfs_attr_list_context {
+ struct xfs_trans *tp;
+ struct xfs_inode *dp;
+ struct xfs_attrlist_cursor_kern cursor;
+ void *buffer;
+ int seen_enough;
+ bool allow_incomplete;
+ ssize_t count;
+ int dupcnt;
+ int bufsize;
+ int firstu;
+ unsigned int attr_filter;
+ int resynch;
+ put_listent_func_t put_listent;
+ int index;
+};
+
+struct xfs_item_ops {
+ unsigned int flags;
+ void (*iop_size)(struct xfs_log_item *, int *, int *);
+ void (*iop_format)(struct xfs_log_item *, struct xfs_log_vec *);
+ void (*iop_pin)(struct xfs_log_item *);
+ void (*iop_unpin)(struct xfs_log_item *, int);
+ uint (*iop_push)(struct xfs_log_item *, struct list_head *);
+ void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t);
+ void (*iop_release)(struct xfs_log_item *);
+ xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
+ void (*iop_error)(struct xfs_log_item *, xfs_buf_t *);
+ int (*iop_recover)(struct xfs_log_item *, struct xfs_trans *);
+ bool (*iop_match)(struct xfs_log_item *, uint64_t);
+};
+
+struct xlog_res {
+ uint r_len;
+ uint r_type;
+};
+
+typedef struct xlog_res xlog_res_t;
+
+struct xlog_ticket {
+ struct list_head t_queue;
+ struct task_struct *t_task;
+ xlog_tid_t t_tid;
+ atomic_t t_ref;
+ int t_curr_res;
+ int t_unit_res;
+ char t_ocnt;
+ char t_cnt;
+ char t_clientid;
+ char t_flags;
+ uint t_res_num;
+ uint t_res_num_ophdrs;
+ uint t_res_arr_sum;
+ uint t_res_o_flow;
+ xlog_res_t t_res_arr[15];
+};
+
+enum xlog_iclog_state {
+ XLOG_STATE_ACTIVE = 0,
+ XLOG_STATE_WANT_SYNC = 1,
+ XLOG_STATE_SYNCING = 2,
+ XLOG_STATE_DONE_SYNC = 3,
+ XLOG_STATE_CALLBACK = 4,
+ XLOG_STATE_DIRTY = 5,
+ XLOG_STATE_IOERROR = 6,
+};
+
+struct xlog_in_core {
+ wait_queue_head_t ic_force_wait;
+ wait_queue_head_t ic_write_wait;
+ struct xlog_in_core *ic_next;
+ struct xlog_in_core *ic_prev;
+ struct xlog *ic_log;
+ u32 ic_size;
+ u32 ic_offset;
+ enum xlog_iclog_state ic_state;
+ char *ic_datap;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ spinlock_t ic_callback_lock;
+ struct list_head ic_callbacks;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ atomic_t ic_refcnt;
+ xlog_in_core_2_t *ic_data;
+ struct semaphore ic_sema;
+ struct work_struct ic_end_io_work;
+ struct bio ic_bio;
+ struct bio_vec ic_bvec[0];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xfs_cil_ctx {
+ struct xfs_cil *cil;
+ xfs_lsn_t sequence;
+ xfs_lsn_t start_lsn;
+ xfs_lsn_t commit_lsn;
+ struct xlog_ticket *ticket;
+ int nvecs;
+ int space_used;
+ struct list_head busy_extents;
+ struct xfs_log_vec *lv_chain;
+ struct list_head iclog_entry;
+ struct list_head committing;
+ wait_queue_head_t push_wait;
+ struct work_struct discard_endio_work;
+};
+
+struct xfs_cil {
+ struct xlog *xc_log;
+ struct list_head xc_cil;
+ spinlock_t xc_cil_lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct rw_semaphore xc_ctx_lock;
+ struct xfs_cil_ctx *xc_ctx;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ spinlock_t xc_push_lock;
+ xfs_lsn_t xc_push_seq;
+ struct list_head xc_committing;
+ wait_queue_head_t xc_commit_wait;
+ xfs_lsn_t xc_current_sequence;
+ struct work_struct xc_push_work;
+ long: 64;
+};
+
+enum {
+ XFS_QLOWSP_1_PCNT = 0,
+ XFS_QLOWSP_3_PCNT = 1,
+ XFS_QLOWSP_5_PCNT = 2,
+ XFS_QLOWSP_MAX = 3,
+};
+
+enum xlog_recover_reorder {
+ XLOG_REORDER_BUFFER_LIST = 0,
+ XLOG_REORDER_ITEM_LIST = 1,
+ XLOG_REORDER_INODE_BUFFER_LIST = 2,
+ XLOG_REORDER_CANCEL_LIST = 3,
+};
+
+struct xlog_recover_item;
+
+struct xlog_recover_item_ops {
+ uint16_t item_type;
+ enum xlog_recover_reorder (*reorder)(struct xlog_recover_item *);
+ void (*ra_pass2)(struct xlog *, struct xlog_recover_item *);
+ int (*commit_pass1)(struct xlog *, struct xlog_recover_item *);
+ int (*commit_pass2)(struct xlog *, struct list_head *, struct xlog_recover_item *, xfs_lsn_t);
+};
+
+struct xlog_recover_item {
+ struct list_head ri_list;
+ int ri_cnt;
+ int ri_total;
+ struct xfs_log_iovec *ri_buf;
+ const struct xlog_recover_item_ops *ri_ops;
+};
+
+struct xlog_recover {
+ struct hlist_node r_list;
+ xlog_tid_t r_log_tid;
+ xfs_trans_header_t r_theader;
+ int r_state;
+ xfs_lsn_t r_lsn;
+ struct list_head r_itemq;
+};
+
+struct xfs_fsmap {
+ dev_t fmr_device;
+ uint32_t fmr_flags;
+ uint64_t fmr_physical;
+ uint64_t fmr_owner;
+ xfs_fileoff_t fmr_offset;
+ xfs_filblks_t fmr_length;
+};
+
+struct trace_event_raw_xfs_attr_list_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ u32 hashval;
+ u32 blkno;
+ u32 offset;
+ void *buffer;
+ int bufsize;
+ int count;
+ int firstu;
+ int dupcnt;
+ unsigned int attr_filter;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_perag_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int refcount;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_attr_list_node_descend {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ u32 hashval;
+ u32 blkno;
+ u32 offset;
+ void *buffer;
+ int bufsize;
+ int count;
+ int firstu;
+ int dupcnt;
+ unsigned int attr_filter;
+ u32 bt_hashval;
+ u32 bt_before;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_bmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ void *leaf;
+ int pos;
+ xfs_fileoff_t startoff;
+ xfs_fsblock_t startblock;
+ xfs_filblks_t blockcount;
+ xfs_exntst_t state;
+ int bmap_state;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_buf_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_daddr_t bno;
+ int nblks;
+ int hold;
+ int pincount;
+ unsigned int lockval;
+ unsigned int flags;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_buf_flags_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_daddr_t bno;
+ size_t buffer_length;
+ int hold;
+ int pincount;
+ unsigned int lockval;
+ unsigned int flags;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_buf_ioerror {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_daddr_t bno;
+ size_t buffer_length;
+ unsigned int flags;
+ int hold;
+ int pincount;
+ unsigned int lockval;
+ int error;
+ xfs_failaddr_t caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_buf_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_daddr_t buf_bno;
+ size_t buf_len;
+ int buf_hold;
+ int buf_pincount;
+ int buf_lockval;
+ unsigned int buf_flags;
+ unsigned int bli_recur;
+ int bli_refcount;
+ unsigned int bli_flags;
+ long unsigned int li_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_filestream_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_agnumber_t agno;
+ int streams;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_filestream_pick {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_agnumber_t agno;
+ int streams;
+ xfs_extlen_t free;
+ int nscan;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_lock_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ int lock_flags;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_inode_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_filemap_fault {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ enum page_entry_size pe_size;
+ bool write_fault;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iref_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ int count;
+ int pincount;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iomap_prealloc_size {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fsblock_t blocks;
+ int shift;
+ unsigned int writeio_blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_irec_merge_pre {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t agino;
+ uint16_t holemask;
+ xfs_agino_t nagino;
+ uint16_t nholemask;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_irec_merge_post {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t agino;
+ uint16_t holemask;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_namespace_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t dp_ino;
+ int namelen;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_rename {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t src_dp_ino;
+ xfs_ino_t target_dp_ino;
+ int src_namelen;
+ int target_namelen;
+ u32 __data_loc_src_name;
+ u32 __data_loc_target_name;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_dquot_class {
+ struct trace_entry ent;
+ dev_t dev;
+ u32 id;
+ unsigned int flags;
+ unsigned int nrefs;
+ long long unsigned int res_bcount;
+ long long unsigned int bcount;
+ long long unsigned int icount;
+ long long unsigned int blk_hardlimit;
+ long long unsigned int blk_softlimit;
+ long long unsigned int ino_hardlimit;
+ long long unsigned int ino_softlimit;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_loggrant_class {
+ struct trace_entry ent;
+ dev_t dev;
+ char ocnt;
+ char cnt;
+ int curr_res;
+ int unit_res;
+ unsigned int flags;
+ int reserveq;
+ int writeq;
+ int grant_reserve_cycle;
+ int grant_reserve_bytes;
+ int grant_write_cycle;
+ int grant_write_bytes;
+ int curr_cycle;
+ int curr_block;
+ xfs_lsn_t tail_lsn;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ void *lip;
+ uint type;
+ long unsigned int flags;
+ xfs_lsn_t lsn;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_force {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_lsn_t lsn;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ail_class {
+ struct trace_entry ent;
+ dev_t dev;
+ void *lip;
+ uint type;
+ long unsigned int flags;
+ xfs_lsn_t old_lsn;
+ xfs_lsn_t new_lsn;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_assign_tail_lsn {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_lsn_t new_lsn;
+ xfs_lsn_t old_lsn;
+ xfs_lsn_t last_sync_lsn;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_file_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fsize_t size;
+ loff_t offset;
+ size_t count;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_imap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ loff_t size;
+ loff_t offset;
+ size_t count;
+ int whichfork;
+ xfs_fileoff_t startoff;
+ xfs_fsblock_t startblock;
+ xfs_filblks_t blockcount;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_simple_io_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ loff_t isize;
+ loff_t disize;
+ loff_t offset;
+ size_t count;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_itrunc_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fsize_t size;
+ xfs_fsize_t new_size;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_pagecache_inval {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fsize_t size;
+ xfs_off_t start;
+ xfs_off_t finish;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_bunmap {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fsize_t size;
+ xfs_fileoff_t bno;
+ xfs_filblks_t len;
+ long unsigned int caller_ip;
+ int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_extent_busy_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_extent_busy_trim {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ xfs_agblock_t tbno;
+ xfs_extlen_t tlen;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_agf_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int flags;
+ __u32 length;
+ __u32 bno_root;
+ __u32 cnt_root;
+ __u32 bno_level;
+ __u32 cnt_level;
+ __u32 flfirst;
+ __u32 fllast;
+ __u32 flcount;
+ __u32 freeblks;
+ __u32 longest;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_free_extent {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int resv;
+ int haveleft;
+ int haveright;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_alloc_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t minlen;
+ xfs_extlen_t maxlen;
+ xfs_extlen_t mod;
+ xfs_extlen_t prod;
+ xfs_extlen_t minleft;
+ xfs_extlen_t total;
+ xfs_extlen_t alignment;
+ xfs_extlen_t minalignslop;
+ xfs_extlen_t len;
+ short int type;
+ short int otype;
+ char wasdel;
+ char wasfromfl;
+ int resv;
+ int datatype;
+ xfs_fsblock_t firstblock;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_alloc_cur_check {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_extlen_t diff;
+ bool new;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_da_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ u32 __data_loc_name;
+ int namelen;
+ xfs_dahash_t hashval;
+ xfs_ino_t inumber;
+ int op_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_attr_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ u32 __data_loc_name;
+ int namelen;
+ int valuelen;
+ xfs_dahash_t hashval;
+ unsigned int attr_filter;
+ unsigned int attr_flags;
+ int op_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_dir2_space_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ int op_flags;
+ int idx;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_dir2_leafn_moveents {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ int op_flags;
+ int src_idx;
+ int dst_idx;
+ int count;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_swap_extent_class {
+ struct trace_entry ent;
+ dev_t dev;
+ int which;
+ xfs_ino_t ino;
+ int format;
+ int nex;
+ int broot_size;
+ int fork_off;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_daddr_t headblk;
+ xfs_daddr_t tailblk;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover_record {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_lsn_t lsn;
+ int len;
+ int num_logops;
+ int pass;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int item;
+ xlog_tid_t tid;
+ xfs_lsn_t lsn;
+ int type;
+ int pass;
+ int count;
+ int total;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover_buf_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ int64_t blkno;
+ short unsigned int len;
+ short unsigned int flags;
+ short unsigned int size;
+ unsigned int map_size;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover_ino_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ short unsigned int size;
+ int fields;
+ short unsigned int asize;
+ short unsigned int dsize;
+ int64_t blkno;
+ int len;
+ int boffset;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_log_recover_icreate_item_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ unsigned int count;
+ unsigned int isize;
+ xfs_agblock_t length;
+ unsigned int gen;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_discard_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_btree_cur_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ int level;
+ int nlevels;
+ int ptr;
+ xfs_daddr_t daddr;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_defer_class {
+ struct trace_entry ent;
+ dev_t dev;
+ struct xfs_trans *tp;
+ char committed;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_defer_error_class {
+ struct trace_entry ent;
+ dev_t dev;
+ struct xfs_trans *tp;
+ char committed;
+ int error;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_defer_pending_class {
+ struct trace_entry ent;
+ dev_t dev;
+ int type;
+ void *intent;
+ char committed;
+ int nr;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_phys_extent_deferred_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int type;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_map_extent_deferred_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_ino_t ino;
+ xfs_agblock_t agbno;
+ int whichfork;
+ xfs_fileoff_t l_loff;
+ xfs_filblks_t l_len;
+ xfs_exntst_t l_state;
+ int op;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_rmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ uint64_t owner;
+ uint64_t offset;
+ long unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_error_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int error;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_rmapbt_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_resv_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int resv;
+ xfs_extlen_t freeblks;
+ xfs_extlen_t flcount;
+ xfs_extlen_t reserved;
+ xfs_extlen_t asked;
+ xfs_extlen_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_btree_lookup_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_lookup_t dir;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_extent_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t startblock;
+ xfs_extlen_t blockcount;
+ xfs_nlink_t refcount;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_extent_at_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t startblock;
+ xfs_extlen_t blockcount;
+ xfs_nlink_t refcount;
+ xfs_agblock_t agbno;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_double_extent_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t i1_startblock;
+ xfs_extlen_t i1_blockcount;
+ xfs_nlink_t i1_refcount;
+ xfs_agblock_t i2_startblock;
+ xfs_extlen_t i2_blockcount;
+ xfs_nlink_t i2_refcount;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_double_extent_at_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t i1_startblock;
+ xfs_extlen_t i1_blockcount;
+ xfs_nlink_t i1_refcount;
+ xfs_agblock_t i2_startblock;
+ xfs_extlen_t i2_blockcount;
+ xfs_nlink_t i2_refcount;
+ xfs_agblock_t agbno;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_triple_extent_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agblock_t i1_startblock;
+ xfs_extlen_t i1_blockcount;
+ xfs_nlink_t i1_refcount;
+ xfs_agblock_t i2_startblock;
+ xfs_extlen_t i2_blockcount;
+ xfs_nlink_t i2_refcount;
+ xfs_agblock_t i3_startblock;
+ xfs_extlen_t i3_blockcount;
+ xfs_nlink_t i3_refcount;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_refcount_finish_one_leftover {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ int type;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ xfs_agblock_t new_agbno;
+ xfs_extlen_t new_len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_inode_error_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ int error;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_double_io_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t src_ino;
+ loff_t src_isize;
+ loff_t src_disize;
+ loff_t src_offset;
+ size_t len;
+ xfs_ino_t dest_ino;
+ loff_t dest_isize;
+ loff_t dest_disize;
+ loff_t dest_offset;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_inode_irec_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fileoff_t lblk;
+ xfs_extlen_t len;
+ xfs_fsblock_t pblk;
+ int state;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_reflink_remap_blocks_loop {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t src_ino;
+ xfs_fileoff_t src_lblk;
+ xfs_filblks_t len;
+ xfs_ino_t dest_ino;
+ xfs_fileoff_t dest_lblk;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_reflink_punch_range {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fileoff_t lblk;
+ xfs_extlen_t len;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_reflink_remap {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ xfs_fileoff_t lblk;
+ xfs_extlen_t len;
+ xfs_fsblock_t new_pblk;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ioctl_clone {
+ struct trace_entry ent;
+ dev_t dev;
+ long unsigned int src_ino;
+ loff_t src_isize;
+ long unsigned int dest_ino;
+ loff_t dest_isize;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_fsmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ dev_t keydev;
+ xfs_agnumber_t agno;
+ xfs_fsblock_t bno;
+ xfs_filblks_t len;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_getfsmap_class {
+ struct trace_entry ent;
+ dev_t dev;
+ dev_t keydev;
+ xfs_daddr_t block;
+ xfs_daddr_t len;
+ uint64_t owner;
+ uint64_t offset;
+ uint64_t flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_trans_resv_calc {
+ struct trace_entry ent;
+ dev_t dev;
+ int type;
+ uint logres;
+ int logcount;
+ int logflags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_trans_class {
+ struct trace_entry ent;
+ dev_t dev;
+ uint32_t tid;
+ uint32_t flags;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iunlink_update_bucket {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ unsigned int bucket;
+ xfs_agino_t old_ptr;
+ xfs_agino_t new_ptr;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iunlink_update_dinode {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t agino;
+ xfs_agino_t old_ptr;
+ xfs_agino_t new_ptr;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_inode_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t agino;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_fs_corrupt_class {
+ struct trace_entry ent;
+ dev_t dev;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_ag_corrupt_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_inode_corrupt_class {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_ino_t ino;
+ unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iwalk_ag {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t startino;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_iwalk_ag_rec {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_agnumber_t agno;
+ xfs_agino_t startino;
+ uint64_t freemask;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_pwork_init {
+ struct trace_entry ent;
+ dev_t dev;
+ unsigned int nr_threads;
+ pid_t pid;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_kmem_class {
+ struct trace_entry ent;
+ ssize_t size;
+ int flags;
+ long unsigned int caller_ip;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_check_new_dalign {
+ struct trace_entry ent;
+ dev_t dev;
+ int new_dalign;
+ xfs_ino_t sb_rootino;
+ xfs_ino_t calc_rootino;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_btree_commit_afakeroot {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ unsigned int levels;
+ unsigned int blocks;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_btree_commit_ifakeroot {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ xfs_agnumber_t agno;
+ xfs_agino_t agino;
+ unsigned int levels;
+ unsigned int blocks;
+ int whichfork;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_btree_bload_level_geometry {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ unsigned int level;
+ unsigned int nlevels;
+ uint64_t nr_this_level;
+ unsigned int nr_per_block;
+ unsigned int desired_npb;
+ long long unsigned int blocks;
+ long long unsigned int blocks_with_extra;
+ char __data[0];
+};
+
+struct trace_event_raw_xfs_btree_bload_block {
+ struct trace_entry ent;
+ dev_t dev;
+ xfs_btnum_t btnum;
+ unsigned int level;
+ long long unsigned int block_idx;
+ long long unsigned int nr_blocks;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ unsigned int nr_records;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_xfs_attr_list_class {};
+
+struct trace_event_data_offsets_xfs_perag_class {};
+
+struct trace_event_data_offsets_xfs_ag_class {};
+
+struct trace_event_data_offsets_xfs_attr_list_node_descend {};
+
+struct trace_event_data_offsets_xfs_bmap_class {};
+
+struct trace_event_data_offsets_xfs_buf_class {};
+
+struct trace_event_data_offsets_xfs_buf_flags_class {};
+
+struct trace_event_data_offsets_xfs_buf_ioerror {};
+
+struct trace_event_data_offsets_xfs_buf_item_class {};
+
+struct trace_event_data_offsets_xfs_filestream_class {};
+
+struct trace_event_data_offsets_xfs_filestream_pick {};
+
+struct trace_event_data_offsets_xfs_lock_class {};
+
+struct trace_event_data_offsets_xfs_inode_class {};
+
+struct trace_event_data_offsets_xfs_filemap_fault {};
+
+struct trace_event_data_offsets_xfs_iref_class {};
+
+struct trace_event_data_offsets_xfs_iomap_prealloc_size {};
+
+struct trace_event_data_offsets_xfs_irec_merge_pre {};
+
+struct trace_event_data_offsets_xfs_irec_merge_post {};
+
+struct trace_event_data_offsets_xfs_namespace_class {
+ u32 name;
+};
+
+struct trace_event_data_offsets_xfs_rename {
+ u32 src_name;
+ u32 target_name;
+};
+
+struct trace_event_data_offsets_xfs_dquot_class {};
+
+struct trace_event_data_offsets_xfs_loggrant_class {};
+
+struct trace_event_data_offsets_xfs_log_item_class {};
+
+struct trace_event_data_offsets_xfs_log_force {};
+
+struct trace_event_data_offsets_xfs_ail_class {};
+
+struct trace_event_data_offsets_xfs_log_assign_tail_lsn {};
+
+struct trace_event_data_offsets_xfs_file_class {};
+
+struct trace_event_data_offsets_xfs_imap_class {};
+
+struct trace_event_data_offsets_xfs_simple_io_class {};
+
+struct trace_event_data_offsets_xfs_itrunc_class {};
+
+struct trace_event_data_offsets_xfs_pagecache_inval {};
+
+struct trace_event_data_offsets_xfs_bunmap {};
+
+struct trace_event_data_offsets_xfs_extent_busy_class {};
+
+struct trace_event_data_offsets_xfs_extent_busy_trim {};
+
+struct trace_event_data_offsets_xfs_agf_class {};
+
+struct trace_event_data_offsets_xfs_free_extent {};
+
+struct trace_event_data_offsets_xfs_alloc_class {};
+
+struct trace_event_data_offsets_xfs_alloc_cur_check {};
+
+struct trace_event_data_offsets_xfs_da_class {
+ u32 name;
+};
+
+struct trace_event_data_offsets_xfs_attr_class {
+ u32 name;
+};
+
+struct trace_event_data_offsets_xfs_dir2_space_class {};
+
+struct trace_event_data_offsets_xfs_dir2_leafn_moveents {};
+
+struct trace_event_data_offsets_xfs_swap_extent_class {};
+
+struct trace_event_data_offsets_xfs_log_recover {};
+
+struct trace_event_data_offsets_xfs_log_recover_record {};
+
+struct trace_event_data_offsets_xfs_log_recover_item_class {};
+
+struct trace_event_data_offsets_xfs_log_recover_buf_item_class {};
+
+struct trace_event_data_offsets_xfs_log_recover_ino_item_class {};
+
+struct trace_event_data_offsets_xfs_log_recover_icreate_item_class {};
+
+struct trace_event_data_offsets_xfs_discard_class {};
+
+struct trace_event_data_offsets_xfs_btree_cur_class {};
+
+struct trace_event_data_offsets_xfs_defer_class {};
+
+struct trace_event_data_offsets_xfs_defer_error_class {};
+
+struct trace_event_data_offsets_xfs_defer_pending_class {};
+
+struct trace_event_data_offsets_xfs_phys_extent_deferred_class {};
+
+struct trace_event_data_offsets_xfs_map_extent_deferred_class {};
+
+struct trace_event_data_offsets_xfs_rmap_class {};
+
+struct trace_event_data_offsets_xfs_ag_error_class {};
+
+struct trace_event_data_offsets_xfs_rmapbt_class {};
+
+struct trace_event_data_offsets_xfs_ag_resv_class {};
+
+struct trace_event_data_offsets_xfs_ag_btree_lookup_class {};
+
+struct trace_event_data_offsets_xfs_refcount_extent_class {};
+
+struct trace_event_data_offsets_xfs_refcount_extent_at_class {};
+
+struct trace_event_data_offsets_xfs_refcount_double_extent_class {};
+
+struct trace_event_data_offsets_xfs_refcount_double_extent_at_class {};
+
+struct trace_event_data_offsets_xfs_refcount_triple_extent_class {};
+
+struct trace_event_data_offsets_xfs_refcount_finish_one_leftover {};
+
+struct trace_event_data_offsets_xfs_inode_error_class {};
+
+struct trace_event_data_offsets_xfs_double_io_class {};
+
+struct trace_event_data_offsets_xfs_inode_irec_class {};
+
+struct trace_event_data_offsets_xfs_reflink_remap_blocks_loop {};
+
+struct trace_event_data_offsets_xfs_reflink_punch_range {};
+
+struct trace_event_data_offsets_xfs_reflink_remap {};
+
+struct trace_event_data_offsets_xfs_ioctl_clone {};
+
+struct trace_event_data_offsets_xfs_fsmap_class {};
+
+struct trace_event_data_offsets_xfs_getfsmap_class {};
+
+struct trace_event_data_offsets_xfs_trans_resv_calc {};
+
+struct trace_event_data_offsets_xfs_trans_class {};
+
+struct trace_event_data_offsets_xfs_iunlink_update_bucket {};
+
+struct trace_event_data_offsets_xfs_iunlink_update_dinode {};
+
+struct trace_event_data_offsets_xfs_ag_inode_class {};
+
+struct trace_event_data_offsets_xfs_fs_corrupt_class {};
+
+struct trace_event_data_offsets_xfs_ag_corrupt_class {};
+
+struct trace_event_data_offsets_xfs_inode_corrupt_class {};
+
+struct trace_event_data_offsets_xfs_iwalk_ag {};
+
+struct trace_event_data_offsets_xfs_iwalk_ag_rec {};
+
+struct trace_event_data_offsets_xfs_pwork_init {};
+
+struct trace_event_data_offsets_xfs_kmem_class {};
+
+struct trace_event_data_offsets_xfs_check_new_dalign {};
+
+struct trace_event_data_offsets_xfs_btree_commit_afakeroot {};
+
+struct trace_event_data_offsets_xfs_btree_commit_ifakeroot {};
+
+struct trace_event_data_offsets_xfs_btree_bload_level_geometry {};
+
+struct trace_event_data_offsets_xfs_btree_bload_block {};
+
+typedef void (*btf_trace_xfs_attr_list_sf)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_sf_all)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_leaf)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_leaf_end)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_full)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_add)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_wrong_blk)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_list_notfound)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_leaf_list)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_attr_node_list)(void *, struct xfs_attr_list_context *);
+
+typedef void (*btf_trace_xfs_perag_get)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_get_tag)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_put)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_set_reclaim)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_clear_reclaim)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_set_eofblocks)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_clear_eofblocks)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_set_cowblocks)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_perag_clear_cowblocks)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_read_agf)(void *, struct xfs_mount *, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_alloc_read_agf)(void *, struct xfs_mount *, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_read_agi)(void *, struct xfs_mount *, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_ialloc_read_agi)(void *, struct xfs_mount *, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_attr_list_node_descend)(void *, struct xfs_attr_list_context *, struct xfs_da_node_entry *);
+
+typedef void (*btf_trace_xfs_iext_insert)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_iext_remove)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_bmap_pre_update)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_bmap_post_update)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_read_extent)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_write_extent)(void *, struct xfs_inode *, struct xfs_iext_cursor *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_init)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_free)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_hold)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_rele)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_iodone)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_submit)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_lock)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_lock_done)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_trylock_fail)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_trylock)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_unlock)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_iowait)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_iowait_done)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_delwri_queue)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_delwri_queued)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_delwri_split)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_delwri_pushbuf)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_get_uncached)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_item_relse)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_item_iodone_async)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_error_relse)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_wait_buftarg)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_read_buf_shut)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_btree_corrupt)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_reset_dqcounts)(void *, struct xfs_buf *, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_find)(void *, struct xfs_buf *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_get)(void *, struct xfs_buf *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_read)(void *, struct xfs_buf *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_buf_ioerror)(void *, struct xfs_buf *, int, xfs_failaddr_t);
+
+typedef void (*btf_trace_xfs_buf_item_size)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_size_ordered)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_size_stale)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_format)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_format_stale)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_ordered)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_pin)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_unpin)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_unpin_stale)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_release)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_committed)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_buf_item_push)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_get_buf)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_get_buf_recur)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_getsb)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_getsb_recur)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_read_buf)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_read_buf_recur)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_log_buf)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_brelse)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_bjoin)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_bhold)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_bhold_release)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_trans_binval)(void *, struct xfs_buf_log_item *);
+
+typedef void (*btf_trace_xfs_filestream_free)(void *, struct xfs_mount *, xfs_ino_t, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_filestream_lookup)(void *, struct xfs_mount *, xfs_ino_t, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_filestream_scan)(void *, struct xfs_mount *, xfs_ino_t, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_filestream_pick)(void *, struct xfs_inode *, xfs_agnumber_t, xfs_extlen_t, int);
+
+typedef void (*btf_trace_xfs_ilock)(void *, struct xfs_inode *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_ilock_nowait)(void *, struct xfs_inode *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_ilock_demote)(void *, struct xfs_inode *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_iunlock)(void *, struct xfs_inode *, unsigned int, long unsigned int);
+
+typedef void (*btf_trace_xfs_iget_skip)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iget_reclaim)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iget_reclaim_fail)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iget_hit)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iget_miss)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_getattr)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_setattr)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_readlink)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inactive_symlink)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_alloc_file_space)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_free_file_space)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_zero_file_space)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_collapse_file_space)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_insert_file_space)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_readdir)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_vm_bmap)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_file_ioctl)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_file_compat_ioctl)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_ioctl_setattr)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_dir_fsync)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_file_fsync)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_destroy_inode)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_update_time)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_dquot_dqalloc)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_dquot_dqdetach)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_set_eofblocks_tag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_clear_eofblocks_tag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_free_eofblocks_invalid)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_set_cowblocks_tag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_clear_cowblocks_tag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_inode_free_cowblocks_invalid)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_filemap_fault)(void *, struct xfs_inode *, enum page_entry_size, bool);
+
+typedef void (*btf_trace_xfs_iomap_prealloc_size)(void *, struct xfs_inode *, xfs_fsblock_t, int, unsigned int);
+
+typedef void (*btf_trace_xfs_irec_merge_pre)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agino_t, uint16_t, xfs_agino_t, uint16_t);
+
+typedef void (*btf_trace_xfs_irec_merge_post)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agino_t, uint16_t);
+
+typedef void (*btf_trace_xfs_irele)(void *, struct xfs_inode *, long unsigned int);
+
+typedef void (*btf_trace_xfs_inode_pin)(void *, struct xfs_inode *, long unsigned int);
+
+typedef void (*btf_trace_xfs_inode_unpin)(void *, struct xfs_inode *, long unsigned int);
+
+typedef void (*btf_trace_xfs_inode_unpin_nowait)(void *, struct xfs_inode *, long unsigned int);
+
+typedef void (*btf_trace_xfs_remove)(void *, struct xfs_inode *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_link)(void *, struct xfs_inode *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_lookup)(void *, struct xfs_inode *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_create)(void *, struct xfs_inode *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_symlink)(void *, struct xfs_inode *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_rename)(void *, struct xfs_inode *, struct xfs_inode *, struct xfs_name *, struct xfs_name *);
+
+typedef void (*btf_trace_xfs_dqadjust)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqreclaim_want)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqreclaim_dirty)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqreclaim_busy)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqreclaim_done)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqattach_found)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqattach_get)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqalloc)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqtobp_read)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqread)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqread_fail)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqget_hit)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqget_miss)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqget_freeing)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqget_dup)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqput)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqput_free)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqrele)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqflush)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqflush_force)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_dqflush_done)(void *, struct xfs_dquot *);
+
+typedef void (*btf_trace_xfs_log_umount_write)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_grant_sleep)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_grant_wake)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_grant_wake_up)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_reserve)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_reserve_exit)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_regrant)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_regrant_exit)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_regrant)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_regrant_exit)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_regrant_sub)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_ungrant)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_ungrant_sub)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_ticket_ungrant_exit)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_cil_wait)(void *, struct xlog *, struct xlog_ticket *);
+
+typedef void (*btf_trace_xfs_log_force)(void *, struct xfs_mount *, xfs_lsn_t, long unsigned int);
+
+typedef void (*btf_trace_xfs_ail_push)(void *, struct xfs_log_item *);
+
+typedef void (*btf_trace_xfs_ail_pinned)(void *, struct xfs_log_item *);
+
+typedef void (*btf_trace_xfs_ail_locked)(void *, struct xfs_log_item *);
+
+typedef void (*btf_trace_xfs_ail_flushing)(void *, struct xfs_log_item *);
+
+typedef void (*btf_trace_xfs_ail_insert)(void *, struct xfs_log_item *, xfs_lsn_t, xfs_lsn_t);
+
+typedef void (*btf_trace_xfs_ail_move)(void *, struct xfs_log_item *, xfs_lsn_t, xfs_lsn_t);
+
+typedef void (*btf_trace_xfs_ail_delete)(void *, struct xfs_log_item *, xfs_lsn_t, xfs_lsn_t);
+
+typedef void (*btf_trace_xfs_log_assign_tail_lsn)(void *, struct xlog *, xfs_lsn_t);
+
+typedef void (*btf_trace_xfs_file_buffered_read)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_file_direct_read)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_file_dax_read)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_file_buffered_write)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_file_direct_write)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_file_dax_write)(void *, struct xfs_inode *, size_t, loff_t);
+
+typedef void (*btf_trace_xfs_map_blocks_found)(void *, struct xfs_inode *, xfs_off_t, ssize_t, int, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_map_blocks_alloc)(void *, struct xfs_inode *, xfs_off_t, ssize_t, int, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_iomap_alloc)(void *, struct xfs_inode *, xfs_off_t, ssize_t, int, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_iomap_found)(void *, struct xfs_inode *, xfs_off_t, ssize_t, int, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_delalloc_enospc)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_unwritten_convert)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_setfilesize)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_zero_eof)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_end_io_direct_write)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_end_io_direct_write_unwritten)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_end_io_direct_write_append)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_itruncate_extents_start)(void *, struct xfs_inode *, xfs_fsize_t);
+
+typedef void (*btf_trace_xfs_itruncate_extents_end)(void *, struct xfs_inode *, xfs_fsize_t);
+
+typedef void (*btf_trace_xfs_pagecache_inval)(void *, struct xfs_inode *, xfs_off_t, xfs_off_t);
+
+typedef void (*btf_trace_xfs_bunmap)(void *, struct xfs_inode *, xfs_fileoff_t, xfs_filblks_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_extent_busy)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_extent_busy_enomem)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_extent_busy_force)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_extent_busy_reuse)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_extent_busy_clear)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_extent_busy_trim)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_agf)(void *, struct xfs_mount *, struct xfs_agf *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_agfl_reset)(void *, struct xfs_mount *, struct xfs_agf *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_free_extent)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, enum xfs_ag_resv_type, int, int);
+
+typedef void (*btf_trace_xfs_alloc_exact_done)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_exact_notfound)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_exact_error)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_near_nominleft)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_near_first)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur_right)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur_left)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur_lookup)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur_lookup_done)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_near_error)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_near_noentry)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_near_busy)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_neither)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_noentry)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_nominleft)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_done)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_error)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_size_busy)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_small_freelist)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_small_notenough)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_small_done)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_small_error)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_vextent_badargs)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_vextent_nofix)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_vextent_noagbp)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_vextent_loopfailed)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_vextent_allfailed)(void *, struct xfs_alloc_arg *);
+
+typedef void (*btf_trace_xfs_alloc_cur_check)(void *, struct xfs_mount *, xfs_btnum_t, xfs_agblock_t, xfs_extlen_t, xfs_extlen_t, bool);
+
+typedef void (*btf_trace_xfs_dir2_sf_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_create)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_toino4)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_toino8)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_sf_to_block)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_to_sf)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_block_to_leaf)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_to_block)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leaf_to_node)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_node_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_node_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_node_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_node_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_node_to_leaf)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_add)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_create)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_remove)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_sf_to_leaf)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_add)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_add_old)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_add_new)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_add_work)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_create)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_compact)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_get)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_lookup)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_remove)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_split)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_split_before)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_split_after)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_clearflag)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_setflag)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_flipflags)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_to_sf)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_to_node)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_rebalance)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_unbalance)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_leaf_toosmall)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_node_addname)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_node_get)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_node_replace)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_node_removename)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_fillstate)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_refillstate)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_rmtval_get)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_rmtval_set)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_attr_rmtval_remove)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_split)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_join)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_link_before)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_link_after)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_unlink_back)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_unlink_forward)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_root_split)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_root_join)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_add)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_create)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_split)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_remove)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_rebalance)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_unbalance)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_node_toosmall)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_swap_lastblock)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_grow_inode)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_shrink_inode)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_fixhashpath)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_da_path_shift)(void *, struct xfs_da_args *);
+
+typedef void (*btf_trace_xfs_dir2_leafn_add)(void *, struct xfs_da_args *, int);
+
+typedef void (*btf_trace_xfs_dir2_leafn_remove)(void *, struct xfs_da_args *, int);
+
+typedef void (*btf_trace_xfs_dir2_grow_inode)(void *, struct xfs_da_args *, int);
+
+typedef void (*btf_trace_xfs_dir2_shrink_inode)(void *, struct xfs_da_args *, int);
+
+typedef void (*btf_trace_xfs_dir2_leafn_moveents)(void *, struct xfs_da_args *, int, int, int);
+
+typedef void (*btf_trace_xfs_swap_extent_before)(void *, struct xfs_inode *, int);
+
+typedef void (*btf_trace_xfs_swap_extent_after)(void *, struct xfs_inode *, int);
+
+typedef void (*btf_trace_xfs_log_recover)(void *, struct xlog *, xfs_daddr_t, xfs_daddr_t);
+
+typedef void (*btf_trace_xfs_log_recover_record)(void *, struct xlog *, struct xlog_rec_header *, int);
+
+typedef void (*btf_trace_xfs_log_recover_item_add)(void *, struct xlog *, struct xlog_recover *, struct xlog_recover_item *, int);
+
+typedef void (*btf_trace_xfs_log_recover_item_add_cont)(void *, struct xlog *, struct xlog_recover *, struct xlog_recover_item *, int);
+
+typedef void (*btf_trace_xfs_log_recover_item_reorder_head)(void *, struct xlog *, struct xlog_recover *, struct xlog_recover_item *, int);
+
+typedef void (*btf_trace_xfs_log_recover_item_reorder_tail)(void *, struct xlog *, struct xlog_recover *, struct xlog_recover_item *, int);
+
+typedef void (*btf_trace_xfs_log_recover_item_recover)(void *, struct xlog *, struct xlog_recover *, struct xlog_recover_item *, int);
+
+typedef void (*btf_trace_xfs_log_recover_buf_not_cancel)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_cancel)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_cancel_add)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_cancel_ref_inc)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_recover)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_skip)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_inode_buf)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_reg_buf)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_buf_dquot_buf)(void *, struct xlog *, struct xfs_buf_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_inode_recover)(void *, struct xlog *, struct xfs_inode_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_inode_cancel)(void *, struct xlog *, struct xfs_inode_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_inode_skip)(void *, struct xlog *, struct xfs_inode_log_format *);
+
+typedef void (*btf_trace_xfs_log_recover_icreate_cancel)(void *, struct xlog *, struct xfs_icreate_log *);
+
+typedef void (*btf_trace_xfs_log_recover_icreate_recover)(void *, struct xlog *, struct xfs_icreate_log *);
+
+typedef void (*btf_trace_xfs_discard_extent)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_discard_toosmall)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_discard_exclude)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_discard_busy)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_btree_updkeys)(void *, struct xfs_btree_cur *, int, struct xfs_buf *);
+
+typedef void (*btf_trace_xfs_btree_overlapped_query_range)(void *, struct xfs_btree_cur *, int, struct xfs_buf *);
+
+typedef void (*btf_trace_xfs_defer_cancel)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_defer_trans_roll)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_defer_trans_abort)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_defer_finish)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_defer_finish_done)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_defer_trans_roll_error)(void *, struct xfs_trans *, int);
+
+typedef void (*btf_trace_xfs_defer_finish_error)(void *, struct xfs_trans *, int);
+
+typedef void (*btf_trace_xfs_defer_create_intent)(void *, struct xfs_mount *, struct xfs_defer_pending *);
+
+typedef void (*btf_trace_xfs_defer_cancel_list)(void *, struct xfs_mount *, struct xfs_defer_pending *);
+
+typedef void (*btf_trace_xfs_defer_pending_finish)(void *, struct xfs_mount *, struct xfs_defer_pending *);
+
+typedef void (*btf_trace_xfs_defer_pending_abort)(void *, struct xfs_mount *, struct xfs_defer_pending *);
+
+typedef void (*btf_trace_xfs_bmap_free_defer)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_bmap_free_deferred)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_agfl_free_defer)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_agfl_free_deferred)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_rmap_unmap)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_unmap_done)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_unmap_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_map)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_map_done)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_map_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_convert)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_convert_done)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, bool, const struct xfs_owner_info *);
+
+typedef void (*btf_trace_xfs_rmap_convert_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_convert_state)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_defer)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_ino_t, int, xfs_fileoff_t, xfs_filblks_t, xfs_exntst_t);
+
+typedef void (*btf_trace_xfs_rmap_deferred)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_ino_t, int, xfs_fileoff_t, xfs_filblks_t, xfs_exntst_t);
+
+typedef void (*btf_trace_xfs_rmapbt_alloc_block)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_rmapbt_free_block)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_rmap_update)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_insert)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_delete)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_insert_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_delete_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_update_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_find_left_neighbor_candidate)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_find_left_neighbor_query)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_lookup_le_range_candidate)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_lookup_le_range)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_lookup_le_range_result)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_find_right_neighbor_result)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_rmap_find_left_neighbor_result)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t, uint64_t, uint64_t, unsigned int);
+
+typedef void (*btf_trace_xfs_bmap_defer)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_ino_t, int, xfs_fileoff_t, xfs_filblks_t, xfs_exntst_t);
+
+typedef void (*btf_trace_xfs_bmap_deferred)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_ino_t, int, xfs_fileoff_t, xfs_filblks_t, xfs_exntst_t);
+
+typedef void (*btf_trace_xfs_ag_resv_init)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_free)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_alloc_extent)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_free_extent)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_critical)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_needed)(void *, struct xfs_perag *, enum xfs_ag_resv_type, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_ag_resv_free_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_ag_resv_init_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcountbt_alloc_block)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcountbt_free_block)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_lookup)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_lookup_t);
+
+typedef void (*btf_trace_xfs_refcount_get)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_update)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_insert)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_delete)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_insert_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_delete_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_update_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_increase)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_decrease)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_cow_increase)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_cow_decrease)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_merge_center_extents)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, struct xfs_refcount_irec *, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_modify_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_recover_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_split_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, xfs_agblock_t);
+
+typedef void (*btf_trace_xfs_refcount_merge_left_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_merge_right_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, struct xfs_refcount_irec *);
+
+typedef void (*btf_trace_xfs_refcount_find_left_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, struct xfs_refcount_irec *, xfs_agblock_t);
+
+typedef void (*btf_trace_xfs_refcount_find_right_extent)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_refcount_irec *, struct xfs_refcount_irec *, xfs_agblock_t);
+
+typedef void (*btf_trace_xfs_refcount_adjust_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_adjust_cow_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_merge_center_extents_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_modify_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_split_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_merge_left_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_merge_right_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_find_left_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_find_right_extent_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_find_shared)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_find_shared_result)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_find_shared_error)(void *, struct xfs_mount *, xfs_agnumber_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_refcount_defer)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_deferred)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_refcount_finish_one_leftover)(void *, struct xfs_mount *, xfs_agnumber_t, int, xfs_agblock_t, xfs_extlen_t, xfs_agblock_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_reflink_set_inode_flag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_reflink_unset_inode_flag)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_reflink_update_inode_size)(void *, struct xfs_inode *, xfs_fsize_t);
+
+typedef void (*btf_trace_xfs_reflink_remap_imap)(void *, struct xfs_inode *, xfs_off_t, ssize_t, int, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_remap_blocks_loop)(void *, struct xfs_inode *, xfs_fileoff_t, xfs_filblks_t, struct xfs_inode *, xfs_fileoff_t);
+
+typedef void (*btf_trace_xfs_reflink_punch_range)(void *, struct xfs_inode *, xfs_fileoff_t, xfs_extlen_t);
+
+typedef void (*btf_trace_xfs_reflink_remap)(void *, struct xfs_inode *, xfs_fileoff_t, xfs_extlen_t, xfs_fsblock_t);
+
+typedef void (*btf_trace_xfs_reflink_remap_range)(void *, struct xfs_inode *, xfs_off_t, xfs_off_t, struct xfs_inode *, xfs_off_t);
+
+typedef void (*btf_trace_xfs_reflink_remap_range_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_set_inode_flag_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_update_inode_size_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_remap_blocks_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_remap_extent_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_compare_extents)(void *, struct xfs_inode *, xfs_off_t, xfs_off_t, struct xfs_inode *, xfs_off_t);
+
+typedef void (*btf_trace_xfs_reflink_compare_extents_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_ioctl_clone)(void *, struct inode *, struct inode *);
+
+typedef void (*btf_trace_xfs_reflink_unshare)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_reflink_unshare_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_trim_around_shared)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_cow_found)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_cow_enospc)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_convert_cow)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_bounce_dio_write)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_reflink_cancel_cow_range)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_reflink_end_cow)(void *, struct xfs_inode *, xfs_off_t, ssize_t);
+
+typedef void (*btf_trace_xfs_reflink_cow_remap)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_reflink_cancel_cow_range_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_end_cow_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_reflink_cancel_cow)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_swap_extent_rmap_remap)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_swap_extent_rmap_remap_piece)(void *, struct xfs_inode *, struct xfs_bmbt_irec *);
+
+typedef void (*btf_trace_xfs_swap_extent_rmap_error)(void *, struct xfs_inode *, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_fsmap_low_key)(void *, struct xfs_mount *, u32, xfs_agnumber_t, struct xfs_rmap_irec *);
+
+typedef void (*btf_trace_xfs_fsmap_high_key)(void *, struct xfs_mount *, u32, xfs_agnumber_t, struct xfs_rmap_irec *);
+
+typedef void (*btf_trace_xfs_fsmap_mapping)(void *, struct xfs_mount *, u32, xfs_agnumber_t, struct xfs_rmap_irec *);
+
+typedef void (*btf_trace_xfs_getfsmap_low_key)(void *, struct xfs_mount *, struct xfs_fsmap *);
+
+typedef void (*btf_trace_xfs_getfsmap_high_key)(void *, struct xfs_mount *, struct xfs_fsmap *);
+
+typedef void (*btf_trace_xfs_getfsmap_mapping)(void *, struct xfs_mount *, struct xfs_fsmap *);
+
+typedef void (*btf_trace_xfs_trans_resv_calc)(void *, struct xfs_mount *, unsigned int, struct xfs_trans_res *);
+
+typedef void (*btf_trace_xfs_trans_alloc)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_cancel)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_commit)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_dup)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_free)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_roll)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_add_item)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_commit_items)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_trans_free_items)(void *, struct xfs_trans *, long unsigned int);
+
+typedef void (*btf_trace_xfs_iunlink_update_bucket)(void *, struct xfs_mount *, xfs_agnumber_t, unsigned int, xfs_agino_t, xfs_agino_t);
+
+typedef void (*btf_trace_xfs_iunlink_update_dinode)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agino_t, xfs_agino_t, xfs_agino_t);
+
+typedef void (*btf_trace_xfs_iunlink)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iunlink_remove)(void *, struct xfs_inode *);
+
+typedef void (*btf_trace_xfs_iunlink_map_prev_fallback)(void *, struct xfs_mount *, xfs_agnumber_t);
+
+typedef void (*btf_trace_xfs_fs_mark_sick)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_fs_mark_healthy)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_fs_unfixed_corruption)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_rt_mark_sick)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_rt_mark_healthy)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_rt_unfixed_corruption)(void *, struct xfs_mount *, unsigned int);
+
+typedef void (*btf_trace_xfs_ag_mark_sick)(void *, struct xfs_mount *, xfs_agnumber_t, unsigned int);
+
+typedef void (*btf_trace_xfs_ag_mark_healthy)(void *, struct xfs_mount *, xfs_agnumber_t, unsigned int);
+
+typedef void (*btf_trace_xfs_ag_unfixed_corruption)(void *, struct xfs_mount *, xfs_agnumber_t, unsigned int);
+
+typedef void (*btf_trace_xfs_inode_mark_sick)(void *, struct xfs_inode *, unsigned int);
+
+typedef void (*btf_trace_xfs_inode_mark_healthy)(void *, struct xfs_inode *, unsigned int);
+
+typedef void (*btf_trace_xfs_iwalk_ag)(void *, struct xfs_mount *, xfs_agnumber_t, xfs_agino_t);
+
+typedef void (*btf_trace_xfs_iwalk_ag_rec)(void *, struct xfs_mount *, xfs_agnumber_t, struct xfs_inobt_rec_incore *);
+
+typedef void (*btf_trace_xfs_pwork_init)(void *, struct xfs_mount *, unsigned int, pid_t);
+
+typedef void (*btf_trace_kmem_alloc)(void *, ssize_t, int, long unsigned int);
+
+typedef void (*btf_trace_kmem_alloc_io)(void *, ssize_t, int, long unsigned int);
+
+typedef void (*btf_trace_kmem_alloc_large)(void *, ssize_t, int, long unsigned int);
+
+typedef void (*btf_trace_kmem_realloc)(void *, ssize_t, int, long unsigned int);
+
+typedef void (*btf_trace_kmem_zone_alloc)(void *, ssize_t, int, long unsigned int);
+
+typedef void (*btf_trace_xfs_check_new_dalign)(void *, struct xfs_mount *, int, xfs_ino_t);
+
+typedef void (*btf_trace_xfs_btree_commit_afakeroot)(void *, struct xfs_btree_cur *);
+
+typedef void (*btf_trace_xfs_btree_commit_ifakeroot)(void *, struct xfs_btree_cur *);
+
+typedef void (*btf_trace_xfs_btree_bload_level_geometry)(void *, struct xfs_btree_cur *, unsigned int, uint64_t, unsigned int, unsigned int, uint64_t, uint64_t);
+
+typedef void (*btf_trace_xfs_btree_bload_block)(void *, struct xfs_btree_cur *, unsigned int, uint64_t, uint64_t, union xfs_btree_ptr *, unsigned int);
+
+struct xfs_ag_geometry {
+ uint32_t ag_number;
+ uint32_t ag_length;
+ uint32_t ag_freeblks;
+ uint32_t ag_icount;
+ uint32_t ag_ifree;
+ uint32_t ag_sick;
+ uint32_t ag_checked;
+ uint32_t ag_flags;
+ uint64_t ag_reserved[12];
+};
+
+typedef struct xfs_sb xfs_sb_t;
+
+struct xfs_dsb {
+ __be32 sb_magicnum;
+ __be32 sb_blocksize;
+ __be64 sb_dblocks;
+ __be64 sb_rblocks;
+ __be64 sb_rextents;
+ uuid_t sb_uuid;
+ __be64 sb_logstart;
+ __be64 sb_rootino;
+ __be64 sb_rbmino;
+ __be64 sb_rsumino;
+ __be32 sb_rextsize;
+ __be32 sb_agblocks;
+ __be32 sb_agcount;
+ __be32 sb_rbmblocks;
+ __be32 sb_logblocks;
+ __be16 sb_versionnum;
+ __be16 sb_sectsize;
+ __be16 sb_inodesize;
+ __be16 sb_inopblock;
+ char sb_fname[12];
+ __u8 sb_blocklog;
+ __u8 sb_sectlog;
+ __u8 sb_inodelog;
+ __u8 sb_inopblog;
+ __u8 sb_agblklog;
+ __u8 sb_rextslog;
+ __u8 sb_inprogress;
+ __u8 sb_imax_pct;
+ __be64 sb_icount;
+ __be64 sb_ifree;
+ __be64 sb_fdblocks;
+ __be64 sb_frextents;
+ __be64 sb_uquotino;
+ __be64 sb_gquotino;
+ __be16 sb_qflags;
+ __u8 sb_flags;
+ __u8 sb_shared_vn;
+ __be32 sb_inoalignmt;
+ __be32 sb_unit;
+ __be32 sb_width;
+ __u8 sb_dirblklog;
+ __u8 sb_logsectlog;
+ __be16 sb_logsectsize;
+ __be32 sb_logsunit;
+ __be32 sb_features2;
+ __be32 sb_bad_features2;
+ __be32 sb_features_compat;
+ __be32 sb_features_ro_compat;
+ __be32 sb_features_incompat;
+ __be32 sb_features_log_incompat;
+ __le32 sb_crc;
+ __be32 sb_spino_align;
+ __be64 sb_pquotino;
+ __be64 sb_lsn;
+ uuid_t sb_meta_uuid;
+};
+
+struct xfs_agi {
+ __be32 agi_magicnum;
+ __be32 agi_versionnum;
+ __be32 agi_seqno;
+ __be32 agi_length;
+ __be32 agi_count;
+ __be32 agi_root;
+ __be32 agi_level;
+ __be32 agi_freecount;
+ __be32 agi_newino;
+ __be32 agi_dirino;
+ __be32 agi_unlinked[64];
+ uuid_t agi_uuid;
+ __be32 agi_crc;
+ __be32 agi_pad32;
+ __be64 agi_lsn;
+ __be32 agi_free_root;
+ __be32 agi_free_level;
+};
+
+struct xfs_agfl {
+ __be32 agfl_magicnum;
+ __be32 agfl_seqno;
+ uuid_t agfl_uuid;
+ __be64 agfl_lsn;
+ __be32 agfl_crc;
+} __attribute__((packed));
+
+typedef struct xfs_alloc_rec xfs_alloc_rec_t;
+
+struct aghdr_init_data {
+ xfs_agblock_t agno;
+ xfs_extlen_t agsize;
+ struct list_head buffer_list;
+ xfs_rfsblock_t nfree;
+ xfs_daddr_t daddr;
+ size_t numblks;
+ xfs_btnum_t type;
+};
+
+typedef void (*aghdr_init_work_f)(struct xfs_mount *, struct xfs_buf *, struct aghdr_init_data *);
+
+struct xfs_aghdr_grow_data {
+ xfs_daddr_t daddr;
+ size_t numblks;
+ const struct xfs_buf_ops *ops;
+ aghdr_init_work_f work;
+ xfs_btnum_t type;
+ bool need_init;
+};
+
+enum xfs_blft {
+ XFS_BLFT_UNKNOWN_BUF = 0,
+ XFS_BLFT_UDQUOT_BUF = 1,
+ XFS_BLFT_PDQUOT_BUF = 2,
+ XFS_BLFT_GDQUOT_BUF = 3,
+ XFS_BLFT_BTREE_BUF = 4,
+ XFS_BLFT_AGF_BUF = 5,
+ XFS_BLFT_AGFL_BUF = 6,
+ XFS_BLFT_AGI_BUF = 7,
+ XFS_BLFT_DINO_BUF = 8,
+ XFS_BLFT_SYMLINK_BUF = 9,
+ XFS_BLFT_DIR_BLOCK_BUF = 10,
+ XFS_BLFT_DIR_DATA_BUF = 11,
+ XFS_BLFT_DIR_FREE_BUF = 12,
+ XFS_BLFT_DIR_LEAF1_BUF = 13,
+ XFS_BLFT_DIR_LEAFN_BUF = 14,
+ XFS_BLFT_DA_NODE_BUF = 15,
+ XFS_BLFT_ATTR_LEAF_BUF = 16,
+ XFS_BLFT_ATTR_RMT_BUF = 17,
+ XFS_BLFT_SB_BUF = 18,
+ XFS_BLFT_RTBITMAP_BUF = 19,
+ XFS_BLFT_RTSUMMARY_BUF = 20,
+ XFS_BLFT_MAX_BUF = 32,
+};
+
+typedef struct xfs_mount xfs_mount_t;
+
+typedef struct xfs_perag xfs_perag_t;
+
+typedef struct xfs_btree_cur xfs_btree_cur_t;
+
+typedef struct xfs_alloc_arg xfs_alloc_arg_t;
+
+typedef int (*xfs_alloc_query_range_fn)(struct xfs_btree_cur *, struct xfs_alloc_rec_incore *, void *);
+
+typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *, xfs_agblock_t, void *);
+
+struct xfs_log_vec {
+ struct xfs_log_vec *lv_next;
+ int lv_niovecs;
+ struct xfs_log_iovec *lv_iovecp;
+ struct xfs_log_item *lv_item;
+ char *lv_buf;
+ int lv_bytes;
+ int lv_buf_len;
+ int lv_size;
+};
+
+typedef struct xfs_trans xfs_trans_t;
+
+struct xfs_extent_free_item {
+ xfs_fsblock_t xefi_startblock;
+ xfs_extlen_t xefi_blockcount;
+ struct list_head xefi_list;
+ struct xfs_owner_info xefi_oinfo;
+ bool xefi_skip_discard;
+};
+
+struct xfs_alloc_cur {
+ struct xfs_btree_cur *cnt;
+ struct xfs_btree_cur *bnolt;
+ struct xfs_btree_cur *bnogt;
+ xfs_extlen_t cur_len;
+ xfs_agblock_t rec_bno;
+ xfs_extlen_t rec_len;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_extlen_t diff;
+ unsigned int busy_gen;
+ bool busy;
+};
+
+struct xfs_alloc_query_range_info {
+ xfs_alloc_query_range_fn fn;
+ void *priv;
+};
+
+typedef unsigned int xfs_km_flags_t;
+
+typedef struct xfs_alloc_rec_incore xfs_alloc_rec_incore_t;
+
+struct xfs_attr_sf_hdr {
+ __be16 totsize;
+ __u8 count;
+ __u8 padding;
+};
+
+struct xfs_attr_sf_entry {
+ uint8_t namelen;
+ uint8_t valuelen;
+ uint8_t flags;
+ uint8_t nameval[1];
+};
+
+struct xfs_attr_shortform {
+ struct xfs_attr_sf_hdr hdr;
+ struct xfs_attr_sf_entry list[1];
+};
+
+typedef struct xfs_attr_shortform xfs_attr_shortform_t;
+
+typedef struct xfs_da_args xfs_da_args_t;
+
+struct xfs_da_state_blk {
+ struct xfs_buf *bp;
+ xfs_dablk_t blkno;
+ xfs_daddr_t disk_blkno;
+ int index;
+ xfs_dahash_t hashval;
+ int magic;
+};
+
+typedef struct xfs_da_state_blk xfs_da_state_blk_t;
+
+struct xfs_da_state_path {
+ int active;
+ xfs_da_state_blk_t blk[5];
+};
+
+typedef struct xfs_da_state_path xfs_da_state_path_t;
+
+struct xfs_da_state {
+ xfs_da_args_t *args;
+ struct xfs_mount *mp;
+ xfs_da_state_path_t path;
+ xfs_da_state_path_t altpath;
+ unsigned char inleaf;
+ unsigned char extravalid;
+ unsigned char extraafter;
+ xfs_da_state_blk_t extrablk;
+};
+
+typedef struct xfs_da_state xfs_da_state_t;
+
+struct xfs_da_blkinfo {
+ __be32 forw;
+ __be32 back;
+ __be16 magic;
+ __be16 pad;
+};
+
+typedef struct xfs_da_blkinfo xfs_da_blkinfo_t;
+
+struct xfs_da3_blkinfo {
+ struct xfs_da_blkinfo hdr;
+ __be32 crc;
+ __be64 blkno;
+ __be64 lsn;
+ uuid_t uuid;
+ __be64 owner;
+};
+
+struct xfs_da_node_hdr {
+ struct xfs_da_blkinfo info;
+ __be16 __count;
+ __be16 __level;
+};
+
+struct xfs_da_intnode {
+ struct xfs_da_node_hdr hdr;
+ struct xfs_da_node_entry __btree[0];
+};
+
+struct xfs_attr_leaf_map {
+ __be16 base;
+ __be16 size;
+};
+
+typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t;
+
+struct xfs_attr_leaf_hdr {
+ xfs_da_blkinfo_t info;
+ __be16 count;
+ __be16 usedbytes;
+ __be16 firstused;
+ __u8 holes;
+ __u8 pad1;
+ xfs_attr_leaf_map_t freemap[3];
+};
+
+typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t;
+
+struct xfs_attr_leaf_entry {
+ __be32 hashval;
+ __be16 nameidx;
+ __u8 flags;
+ __u8 pad2;
+};
+
+typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t;
+
+struct xfs_attr_leaf_name_local {
+ __be16 valuelen;
+ __u8 namelen;
+ __u8 nameval[1];
+};
+
+typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t;
+
+struct xfs_attr_leaf_name_remote {
+ __be32 valueblk;
+ __be32 valuelen;
+ __u8 namelen;
+ __u8 name[1];
+};
+
+typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
+
+struct xfs_attr_leafblock {
+ xfs_attr_leaf_hdr_t hdr;
+ xfs_attr_leaf_entry_t entries[1];
+};
+
+typedef struct xfs_attr_leafblock xfs_attr_leafblock_t;
+
+struct xfs_attr3_leaf_hdr {
+ struct xfs_da3_blkinfo info;
+ __be16 count;
+ __be16 usedbytes;
+ __be16 firstused;
+ __u8 holes;
+ __u8 pad1;
+ struct xfs_attr_leaf_map freemap[3];
+ __be32 pad2;
+};
+
+struct xfs_attr3_leafblock {
+ struct xfs_attr3_leaf_hdr hdr;
+ struct xfs_attr_leaf_entry entries[1];
+};
+
+struct xfs_da3_icnode_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t level;
+ struct xfs_da_node_entry *btree;
+};
+
+typedef struct xfs_inode xfs_inode_t;
+
+typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t;
+
+typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t;
+
+struct xfs_attr3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t usedbytes;
+ uint32_t firstused;
+ __u8 holes;
+ struct {
+ uint16_t base;
+ uint16_t size;
+ } freemap[3];
+};
+
+struct xfs_attr3_rmt_hdr {
+ __be32 rm_magic;
+ __be32 rm_offset;
+ __be32 rm_bytes;
+ __be32 rm_crc;
+ uuid_t rm_uuid;
+ __be64 rm_owner;
+ __be64 rm_blkno;
+ __be64 rm_lsn;
+};
+
+typedef s16 int16_t;
+
+typedef int16_t xfs_aextnum_t;
+
+typedef struct xfs_bmbt_irec xfs_bmbt_irec_t;
+
+typedef struct xfs_bmbt_key xfs_bmbt_key_t;
+
+typedef __be64 xfs_bmbt_ptr_t;
+
+struct xfs_bmalloca {
+ struct xfs_trans *tp;
+ struct xfs_inode *ip;
+ struct xfs_bmbt_irec prev;
+ struct xfs_bmbt_irec got;
+ xfs_fileoff_t offset;
+ xfs_extlen_t length;
+ xfs_fsblock_t blkno;
+ struct xfs_btree_cur *cur;
+ struct xfs_iext_cursor icur;
+ int nallocs;
+ int logflags;
+ xfs_extlen_t total;
+ xfs_extlen_t minlen;
+ xfs_extlen_t minleft;
+ bool eof;
+ bool wasdel;
+ bool aeof;
+ bool conv;
+ int datatype;
+ int flags;
+};
+
+enum xfs_bmap_intent_type {
+ XFS_BMAP_MAP = 1,
+ XFS_BMAP_UNMAP = 2,
+};
+
+struct xfs_bmap_intent {
+ struct list_head bi_list;
+ enum xfs_bmap_intent_type bi_type;
+ struct xfs_inode *bi_owner;
+ int bi_whichfork;
+ struct xfs_bmbt_irec bi_bmap;
+};
+
+struct xfs_iread_state {
+ struct xfs_iext_cursor icur;
+ xfs_extnum_t loaded;
+};
+
+struct xfs_bmdr_block {
+ __be16 bb_level;
+ __be16 bb_numrecs;
+};
+
+typedef struct xfs_bmdr_block xfs_bmdr_block_t;
+
+typedef uint64_t xfs_bmbt_rec_base_t;
+
+typedef __be64 xfs_bmdr_ptr_t;
+
+typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *, union xfs_btree_rec *, void *);
+
+typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *, int, void *);
+
+struct xfs_btree_split_args {
+ struct xfs_btree_cur *cur;
+ int level;
+ union xfs_btree_ptr *ptrp;
+ union xfs_btree_key *key;
+ struct xfs_btree_cur **curp;
+ int *stat;
+ int result;
+ bool kswapd;
+ struct completion *done;
+ struct work_struct work;
+};
+
+struct xfs_btree_block_change_owner_info {
+ uint64_t new_owner;
+ struct list_head *buffer_list;
+};
+
+typedef int (*xfs_btree_bload_get_record_fn)(struct xfs_btree_cur *, void *);
+
+typedef int (*xfs_btree_bload_claim_block_fn)(struct xfs_btree_cur *, union xfs_btree_ptr *, void *);
+
+typedef size_t (*xfs_btree_bload_iroot_size_fn)(struct xfs_btree_cur *, unsigned int, void *);
+
+struct xfs_btree_bload {
+ xfs_btree_bload_get_record_fn get_record;
+ xfs_btree_bload_claim_block_fn claim_block;
+ xfs_btree_bload_iroot_size_fn iroot_size;
+ uint64_t nr_records;
+ int leaf_slack;
+ int node_slack;
+ uint64_t nr_blocks;
+ unsigned int btree_height;
+};
+
+struct xfs_da3_node_hdr {
+ struct xfs_da3_blkinfo info;
+ __be16 __count;
+ __be16 __level;
+ __be32 __pad32;
+};
+
+typedef struct xfs_da_intnode xfs_da_intnode_t;
+
+struct xfs_da3_intnode {
+ struct xfs_da3_node_hdr hdr;
+ struct xfs_da_node_entry __btree[0];
+};
+
+struct xfs_dir2_leaf_hdr {
+ xfs_da_blkinfo_t info;
+ __be16 count;
+ __be16 stale;
+};
+
+typedef struct xfs_dir2_leaf_hdr xfs_dir2_leaf_hdr_t;
+
+struct xfs_dir2_leaf_entry {
+ __be32 hashval;
+ __be32 address;
+};
+
+typedef struct xfs_dir2_leaf_entry xfs_dir2_leaf_entry_t;
+
+struct xfs_dir2_leaf {
+ xfs_dir2_leaf_hdr_t hdr;
+ xfs_dir2_leaf_entry_t __ents[0];
+};
+
+typedef struct xfs_dir2_leaf xfs_dir2_leaf_t;
+
+struct xfs_dir3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t stale;
+ struct xfs_dir2_leaf_entry *ents;
+};
+
+struct xfs_inode_log_item {
+ struct xfs_log_item ili_item;
+ struct xfs_inode *ili_inode;
+ xfs_lsn_t ili_flush_lsn;
+ xfs_lsn_t ili_last_lsn;
+ short unsigned int ili_lock_flags;
+ short unsigned int ili_logged;
+ unsigned int ili_last_fields;
+ unsigned int ili_fields;
+ unsigned int ili_fsync_fields;
+};
+
+typedef xfs_off_t xfs_dir2_off_t;
+
+typedef uint32_t xfs_dir2_db_t;
+
+struct xfs_dir2_sf_hdr {
+ uint8_t count;
+ uint8_t i8count;
+ uint8_t parent[8];
+};
+
+typedef struct xfs_dir2_sf_hdr xfs_dir2_sf_hdr_t;
+
+typedef uint32_t xfs_dir2_dataptr_t;
+
+struct xfs_dir2_sf_entry {
+ __u8 namelen;
+ __u8 offset[2];
+ __u8 name[0];
+};
+
+typedef struct xfs_dir2_sf_entry xfs_dir2_sf_entry_t;
+
+struct xfs_dir2_data_free {
+ __be16 offset;
+ __be16 length;
+};
+
+typedef struct xfs_dir2_data_free xfs_dir2_data_free_t;
+
+struct xfs_dir2_data_hdr {
+ __be32 magic;
+ xfs_dir2_data_free_t bestfree[3];
+};
+
+typedef struct xfs_dir2_data_hdr xfs_dir2_data_hdr_t;
+
+struct xfs_dir3_blk_hdr {
+ __be32 magic;
+ __be32 crc;
+ __be64 blkno;
+ __be64 lsn;
+ uuid_t uuid;
+ __be64 owner;
+};
+
+struct xfs_dir2_data_entry {
+ __be64 inumber;
+ __u8 namelen;
+ __u8 name[0];
+};
+
+typedef struct xfs_dir2_data_entry xfs_dir2_data_entry_t;
+
+struct xfs_dir2_data_unused {
+ __be16 freetag;
+ __be16 length;
+ __be16 tag;
+};
+
+typedef struct xfs_dir2_data_unused xfs_dir2_data_unused_t;
+
+struct xfs_dir2_leaf_tail {
+ __be32 bestcount;
+};
+
+typedef struct xfs_dir2_leaf_tail xfs_dir2_leaf_tail_t;
+
+struct xfs_dir2_block_tail {
+ __be32 count;
+ __be32 stale;
+};
+
+typedef struct xfs_dir2_block_tail xfs_dir2_block_tail_t;
+
+struct xfs_dir3_data_hdr {
+ struct xfs_dir3_blk_hdr hdr;
+ xfs_dir2_data_free_t best_free[3];
+ __be32 pad;
+};
+
+typedef uint16_t xfs_dir2_data_off_t;
+
+struct xfs_dir3_leaf_hdr {
+ struct xfs_da3_blkinfo info;
+ __be16 count;
+ __be16 stale;
+ __be32 pad;
+};
+
+struct xfs_dir3_leaf {
+ struct xfs_dir3_leaf_hdr hdr;
+ struct xfs_dir2_leaf_entry __ents[0];
+};
+
+struct xfs_dir3_icfree_hdr {
+ uint32_t magic;
+ uint32_t firstdb;
+ uint32_t nvalid;
+ uint32_t nused;
+ __be16 *bests;
+};
+
+struct xfs_dir2_free_hdr {
+ __be32 magic;
+ __be32 firstdb;
+ __be32 nvalid;
+ __be32 nused;
+};
+
+typedef struct xfs_dir2_free_hdr xfs_dir2_free_hdr_t;
+
+struct xfs_dir2_free {
+ xfs_dir2_free_hdr_t hdr;
+ __be16 bests[0];
+};
+
+typedef struct xfs_dir2_free xfs_dir2_free_t;
+
+struct xfs_dir3_free_hdr {
+ struct xfs_dir3_blk_hdr hdr;
+ __be32 firstdb;
+ __be32 nvalid;
+ __be32 nused;
+ __be32 pad;
+};
+
+struct xfs_dir3_free {
+ struct xfs_dir3_free_hdr hdr;
+ __be16 bests[0];
+};
+
+typedef uint32_t xfs_dqid_t;
+
+struct xfs_dqblk {
+ struct xfs_disk_dquot dd_diskdq;
+ char dd_fill[4];
+ __be32 dd_crc;
+ __be64 dd_lsn;
+ uuid_t dd_uuid;
+};
+
+typedef uint16_t xfs_qwarncnt_t;
+
+struct xfs_def_quota {
+ time64_t btimelimit;
+ time64_t itimelimit;
+ time64_t rtbtimelimit;
+ xfs_qwarncnt_t bwarnlimit;
+ xfs_qwarncnt_t iwarnlimit;
+ xfs_qwarncnt_t rtbwarnlimit;
+ xfs_qcnt_t bhardlimit;
+ xfs_qcnt_t bsoftlimit;
+ xfs_qcnt_t ihardlimit;
+ xfs_qcnt_t isoftlimit;
+ xfs_qcnt_t rtbhardlimit;
+ xfs_qcnt_t rtbsoftlimit;
+};
+
+struct xfs_quotainfo {
+ struct xarray qi_uquota_tree;
+ struct xarray qi_gquota_tree;
+ struct xarray qi_pquota_tree;
+ struct mutex qi_tree_lock;
+ struct xfs_inode *qi_uquotaip;
+ struct xfs_inode *qi_gquotaip;
+ struct xfs_inode *qi_pquotaip;
+ struct list_lru qi_lru;
+ int qi_dquots;
+ struct mutex qi_quotaofflock;
+ xfs_filblks_t qi_dqchunklen;
+ uint qi_dqperchunk;
+ struct xfs_def_quota qi_usr_default;
+ struct xfs_def_quota qi_grp_default;
+ struct xfs_def_quota qi_prj_default;
+ struct shrinker qi_shrinker;
+};
+
+struct xfs_dqtrx {
+ struct xfs_dquot *qt_dquot;
+ uint64_t qt_blk_res;
+ int64_t qt_bcount_delta;
+ int64_t qt_delbcnt_delta;
+ uint64_t qt_rtblk_res;
+ uint64_t qt_rtblk_res_used;
+ int64_t qt_rtbcount_delta;
+ int64_t qt_delrtb_delta;
+ uint64_t qt_ino_res;
+ uint64_t qt_ino_res_used;
+ int64_t qt_icount_delta;
+};
+
+struct xfs_dquot_acct {
+ struct xfs_dqtrx dqs[6];
+};
+
+enum {
+ XFS_QM_TRANS_USR = 0,
+ XFS_QM_TRANS_GRP = 1,
+ XFS_QM_TRANS_PRJ = 2,
+ XFS_QM_TRANS_DQTYPES = 3,
+};
+
+struct xfs_timestamp {
+ __be32 t_sec;
+ __be32 t_nsec;
+};
+
+typedef struct xfs_timestamp xfs_timestamp_t;
+
+struct xfs_dinode {
+ __be16 di_magic;
+ __be16 di_mode;
+ __u8 di_version;
+ __u8 di_format;
+ __be16 di_onlink;
+ __be32 di_uid;
+ __be32 di_gid;
+ __be32 di_nlink;
+ __be16 di_projid_lo;
+ __be16 di_projid_hi;
+ __u8 di_pad[6];
+ __be16 di_flushiter;
+ xfs_timestamp_t di_atime;
+ xfs_timestamp_t di_mtime;
+ xfs_timestamp_t di_ctime;
+ __be64 di_size;
+ __be64 di_nblocks;
+ __be32 di_extsize;
+ __be32 di_nextents;
+ __be16 di_anextents;
+ __u8 di_forkoff;
+ __s8 di_aformat;
+ __be32 di_dmevmask;
+ __be16 di_dmstate;
+ __be16 di_flags;
+ __be32 di_gen;
+ __be32 di_next_unlinked;
+ __le32 di_crc;
+ __be64 di_changecount;
+ __be64 di_lsn;
+ __be64 di_flags2;
+ __be32 di_cowextsize;
+ __u8 di_pad2[12];
+ xfs_timestamp_t di_crtime;
+ __be64 di_ino;
+ uuid_t di_uuid;
+};
+
+typedef struct xfs_inobt_rec_incore xfs_inobt_rec_incore_t;
+
+struct xfs_icluster {
+ bool deleted;
+ xfs_ino_t first_ino;
+ uint64_t alloc;
+};
+
+struct xfs_ialloc_count_inodes {
+ xfs_agino_t count;
+ xfs_agino_t freecount;
+};
+
+struct xfs_iext_rec {
+ uint64_t lo;
+ uint64_t hi;
+};
+
+struct xfs_iext_leaf {
+ struct xfs_iext_rec recs[15];
+ struct xfs_iext_leaf *prev;
+ struct xfs_iext_leaf *next;
+};
+
+enum {
+ NODE_SIZE = 256,
+ KEYS_PER_NODE = 16,
+ RECS_PER_LEAF = 15,
+};
+
+struct xfs_iext_node {
+ uint64_t keys[16];
+ void *ptrs[16];
+};
+
+typedef __u32 xfs_dev_t;
+
+typedef struct xfs_dinode xfs_dinode_t;
+
+typedef uint32_t prid_t;
+
+struct xfs_ictimestamp {
+ int32_t t_sec;
+ int32_t t_nsec;
+};
+
+typedef struct xfs_ictimestamp xfs_ictimestamp_t;
+
+struct xfs_log_dinode {
+ uint16_t di_magic;
+ uint16_t di_mode;
+ int8_t di_version;
+ int8_t di_format;
+ uint8_t di_pad3[2];
+ uint32_t di_uid;
+ uint32_t di_gid;
+ uint32_t di_nlink;
+ uint16_t di_projid_lo;
+ uint16_t di_projid_hi;
+ uint8_t di_pad[6];
+ uint16_t di_flushiter;
+ xfs_ictimestamp_t di_atime;
+ xfs_ictimestamp_t di_mtime;
+ xfs_ictimestamp_t di_ctime;
+ xfs_fsize_t di_size;
+ xfs_rfsblock_t di_nblocks;
+ xfs_extlen_t di_extsize;
+ xfs_extnum_t di_nextents;
+ xfs_aextnum_t di_anextents;
+ uint8_t di_forkoff;
+ int8_t di_aformat;
+ uint32_t di_dmevmask;
+ uint16_t di_dmstate;
+ uint16_t di_flags;
+ uint32_t di_gen;
+ xfs_agino_t di_next_unlinked;
+ uint32_t di_crc;
+ uint64_t di_changecount;
+ xfs_lsn_t di_lsn;
+ uint64_t di_flags2;
+ uint32_t di_cowextsize;
+ uint8_t di_pad2[12];
+ xfs_ictimestamp_t di_crtime;
+ xfs_ino_t di_ino;
+ uuid_t di_uuid;
+};
+
+typedef int (*xfs_rmap_query_range_fn)(struct xfs_btree_cur *, struct xfs_rmap_irec *, void *);
+
+enum xfs_rmap_intent_type {
+ XFS_RMAP_MAP = 0,
+ XFS_RMAP_MAP_SHARED = 1,
+ XFS_RMAP_UNMAP = 2,
+ XFS_RMAP_UNMAP_SHARED = 3,
+ XFS_RMAP_CONVERT = 4,
+ XFS_RMAP_CONVERT_SHARED = 5,
+ XFS_RMAP_ALLOC = 6,
+ XFS_RMAP_FREE = 7,
+};
+
+struct xfs_rmap_intent {
+ struct list_head ri_list;
+ enum xfs_rmap_intent_type ri_type;
+ uint64_t ri_owner;
+ int ri_whichfork;
+ struct xfs_bmbt_irec ri_bmap;
+};
+
+struct xfs_find_left_neighbor_info {
+ struct xfs_rmap_irec high;
+ struct xfs_rmap_irec *irec;
+ int *stat;
+};
+
+struct xfs_rmap_query_range_info {
+ xfs_rmap_query_range_fn fn;
+ void *priv;
+};
+
+struct xfs_rmap_key_state {
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+};
+
+enum xfs_refcount_intent_type {
+ XFS_REFCOUNT_INCREASE = 1,
+ XFS_REFCOUNT_DECREASE = 2,
+ XFS_REFCOUNT_ALLOC_COW = 3,
+ XFS_REFCOUNT_FREE_COW = 4,
+};
+
+struct xfs_refcount_intent {
+ struct list_head ri_list;
+ enum xfs_refcount_intent_type ri_type;
+ xfs_fsblock_t ri_startblock;
+ xfs_extlen_t ri_blockcount;
+};
+
+enum xfs_refc_adjust_op {
+ XFS_REFCOUNT_ADJUST_INCREASE = 1,
+ XFS_REFCOUNT_ADJUST_DECREASE = -1,
+ XFS_REFCOUNT_ADJUST_COW_ALLOC = 0,
+ XFS_REFCOUNT_ADJUST_COW_FREE = -1,
+};
+
+struct xfs_refcount_recovery {
+ struct list_head rr_list;
+ struct xfs_refcount_irec rr_rrec;
+};
+
+struct xfs_fsop_geom {
+ __u32 blocksize;
+ __u32 rtextsize;
+ __u32 agblocks;
+ __u32 agcount;
+ __u32 logblocks;
+ __u32 sectsize;
+ __u32 inodesize;
+ __u32 imaxpct;
+ __u64 datablocks;
+ __u64 rtblocks;
+ __u64 rtextents;
+ __u64 logstart;
+ unsigned char uuid[16];
+ __u32 sunit;
+ __u32 swidth;
+ __s32 version;
+ __u32 flags;
+ __u32 logsectsize;
+ __u32 rtsectsize;
+ __u32 dirblocksize;
+ __u32 logsunit;
+ uint32_t sick;
+ uint32_t checked;
+ __u64 reserved[17];
+};
+
+typedef struct xfs_dsb xfs_dsb_t;
+
+struct xfs_dsymlink_hdr {
+ __be32 sl_magic;
+ __be32 sl_offset;
+ __be32 sl_bytes;
+ __be32 sl_crc;
+ uuid_t sl_uuid;
+ __be64 sl_owner;
+ __be64 sl_blkno;
+ __be64 sl_lsn;
+};
+
+struct xfs_ail {
+ struct xfs_mount *ail_mount;
+ struct task_struct *ail_task;
+ struct list_head ail_head;
+ xfs_lsn_t ail_target;
+ xfs_lsn_t ail_target_prev;
+ struct list_head ail_cursors;
+ spinlock_t ail_lock;
+ xfs_lsn_t ail_last_pushed_lsn;
+ int ail_log_flush;
+ struct list_head ail_buf_list;
+ wait_queue_head_t ail_empty;
+};
+
+struct xfs_writepage_ctx {
+ struct iomap_writepage_ctx ctx;
+ unsigned int data_seq;
+ unsigned int cow_seq;
+};
+
+struct xfs_attr_sf_sort {
+ uint8_t entno;
+ uint8_t namelen;
+ uint8_t valuelen;
+ uint8_t flags;
+ xfs_dahash_t hash;
+ unsigned char *name;
+};
+
+typedef struct xfs_attr_sf_sort xfs_attr_sf_sort_t;
+
+typedef uint64_t xfs_ufsize_t;
+
+struct getbmapx {
+ __s64 bmv_offset;
+ __s64 bmv_block;
+ __s64 bmv_length;
+ __s32 bmv_count;
+ __s32 bmv_entries;
+ __s32 bmv_iflags;
+ __s32 bmv_oflags;
+ __s32 bmv_unused1;
+ __s32 bmv_unused2;
+};
+
+struct xfs_bstime {
+ __kernel_long_t tv_sec;
+ __s32 tv_nsec;
+};
+
+typedef struct xfs_bstime xfs_bstime_t;
+
+struct xfs_bstat {
+ __u64 bs_ino;
+ __u16 bs_mode;
+ __u16 bs_nlink;
+ __u32 bs_uid;
+ __u32 bs_gid;
+ __u32 bs_rdev;
+ __s32 bs_blksize;
+ __s64 bs_size;
+ xfs_bstime_t bs_atime;
+ xfs_bstime_t bs_mtime;
+ xfs_bstime_t bs_ctime;
+ int64_t bs_blocks;
+ __u32 bs_xflags;
+ __s32 bs_extsize;
+ __s32 bs_extents;
+ __u32 bs_gen;
+ __u16 bs_projid_lo;
+ __u16 bs_forkoff;
+ __u16 bs_projid_hi;
+ uint16_t bs_sick;
+ uint16_t bs_checked;
+ unsigned char bs_pad[2];
+ __u32 bs_cowextsize;
+ __u32 bs_dmevmask;
+ __u16 bs_dmstate;
+ __u16 bs_aextents;
+};
+
+struct xfs_swapext {
+ int64_t sx_version;
+ int64_t sx_fdtarget;
+ int64_t sx_fdtmp;
+ xfs_off_t sx_offset;
+ xfs_off_t sx_length;
+ char sx_pad[16];
+ struct xfs_bstat sx_stat;
+};
+
+struct kgetbmap {
+ __s64 bmv_offset;
+ __s64 bmv_block;
+ __s64 bmv_length;
+ __s32 bmv_oflags;
+};
+
+struct xfs_fid64 {
+ u64 ino;
+ u32 gen;
+ u64 parent_ino;
+ u32 parent_gen;
+} __attribute__((packed));
+
+struct xfs_extent_busy {
+ struct rb_node rb_node;
+ struct list_head list;
+ xfs_agnumber_t agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t length;
+ unsigned int flags;
+};
+
+enum layout_break_reason {
+ BREAK_WRITE = 0,
+ BREAK_UNMAP = 1,
+};
+
+enum xfs_prealloc_flags {
+ XFS_PREALLOC_SET = 2,
+ XFS_PREALLOC_CLEAR = 4,
+ XFS_PREALLOC_SYNC = 8,
+ XFS_PREALLOC_INVISIBLE = 16,
+};
+
+struct xfs_eofblocks {
+ __u32 eof_flags;
+ kuid_t eof_uid;
+ kgid_t eof_gid;
+ prid_t eof_prid;
+ __u64 eof_min_file_size;
+};
+
+struct xfs_mru_cache_elem {
+ struct list_head list_node;
+ long unsigned int key;
+};
+
+struct xfs_fstrm_item {
+ struct xfs_mru_cache_elem mru;
+ xfs_agnumber_t ag;
+};
+
+enum xfs_fstrm_alloc {
+ XFS_PICK_USERDATA = 1,
+ XFS_PICK_LOWSPACE = 2,
+};
+
+struct xfs_fsmap_head {
+ uint32_t fmh_iflags;
+ uint32_t fmh_oflags;
+ unsigned int fmh_count;
+ unsigned int fmh_entries;
+ struct xfs_fsmap fmh_keys[2];
+};
+
+typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
+
+struct xfs_getfsmap_info {
+ struct xfs_fsmap_head *head;
+ xfs_fsmap_format_t formatter;
+ void *format_arg;
+ struct xfs_buf *agf_bp;
+ xfs_daddr_t next_daddr;
+ u64 missing_owner;
+ u32 dev;
+ xfs_agnumber_t agno;
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ bool last;
+};
+
+struct xfs_getfsmap_dev {
+ u32 dev;
+ int (*fn)(struct xfs_trans *, struct xfs_fsmap *, struct xfs_getfsmap_info *);
+};
+
+struct xfs_fsop_counts {
+ __u64 freedata;
+ __u64 freertx;
+ __u64 freeino;
+ __u64 allocino;
+};
+
+typedef struct xfs_fsop_counts xfs_fsop_counts_t;
+
+struct xfs_fsop_resblks {
+ __u64 resblks;
+ __u64 resblks_avail;
+};
+
+typedef struct xfs_fsop_resblks xfs_fsop_resblks_t;
+
+struct xfs_growfs_data {
+ __u64 newblocks;
+ __u32 imaxpct;
+};
+
+typedef struct xfs_growfs_data xfs_growfs_data_t;
+
+struct xfs_growfs_log {
+ __u32 newblocks;
+ __u32 isint;
+};
+
+typedef struct xfs_growfs_log xfs_growfs_log_t;
+
+struct xfs_bulkstat {
+ uint64_t bs_ino;
+ uint64_t bs_size;
+ uint64_t bs_blocks;
+ uint64_t bs_xflags;
+ int64_t bs_atime;
+ int64_t bs_mtime;
+ int64_t bs_ctime;
+ int64_t bs_btime;
+ uint32_t bs_gen;
+ uint32_t bs_uid;
+ uint32_t bs_gid;
+ uint32_t bs_projectid;
+ uint32_t bs_atime_nsec;
+ uint32_t bs_mtime_nsec;
+ uint32_t bs_ctime_nsec;
+ uint32_t bs_btime_nsec;
+ uint32_t bs_blksize;
+ uint32_t bs_rdev;
+ uint32_t bs_cowextsize_blks;
+ uint32_t bs_extsize_blks;
+ uint32_t bs_nlink;
+ uint32_t bs_extents;
+ uint32_t bs_aextents;
+ uint16_t bs_version;
+ uint16_t bs_forkoff;
+ uint16_t bs_sick;
+ uint16_t bs_checked;
+ uint16_t bs_mode;
+ uint16_t bs_pad2;
+ uint64_t bs_pad[7];
+};
+
+struct ioctl_sick_map {
+ unsigned int sick_mask;
+ unsigned int ioctl_mask;
+};
+
+struct dioattr {
+ __u32 d_mem;
+ __u32 d_miniosz;
+ __u32 d_maxiosz;
+};
+
+struct xfs_flock64 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len;
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4];
+};
+
+typedef struct xfs_flock64 xfs_flock64_t;
+
+struct xfs_growfs_rt {
+ __u64 newblocks;
+ __u32 extsize;
+};
+
+typedef struct xfs_growfs_rt xfs_growfs_rt_t;
+
+struct xfs_fsop_bulkreq {
+ __u64 *lastip;
+ __s32 icount;
+ void *ubuffer;
+ __s32 *ocount;
+};
+
+struct xfs_inogrp {
+ __u64 xi_startino;
+ __s32 xi_alloccount;
+ __u64 xi_allocmask;
+};
+
+struct xfs_inumbers {
+ uint64_t xi_startino;
+ uint64_t xi_allocmask;
+ uint8_t xi_alloccount;
+ uint8_t xi_version;
+ uint8_t xi_padding[6];
+};
+
+struct xfs_bulk_ireq {
+ uint64_t ino;
+ uint32_t flags;
+ uint32_t icount;
+ uint32_t ocount;
+ uint32_t agno;
+ uint64_t reserved[5];
+};
+
+struct xfs_bulkstat_req {
+ struct xfs_bulk_ireq hdr;
+ struct xfs_bulkstat bulkstat[0];
+};
+
+struct xfs_inumbers_req {
+ struct xfs_bulk_ireq hdr;
+ struct xfs_inumbers inumbers[0];
+};
+
+struct xfs_error_injection {
+ __s32 fd;
+ __s32 errtag;
+};
+
+typedef struct xfs_error_injection xfs_error_injection_t;
+
+struct xfs_fs_eofblocks {
+ __u32 eof_version;
+ __u32 eof_flags;
+ uid_t eof_uid;
+ gid_t eof_gid;
+ prid_t eof_prid;
+ __u32 pad32;
+ __u64 eof_min_file_size;
+ __u64 pad64[12];
+};
+
+struct xfs_fsop_handlereq {
+ __u32 fd;
+ void *path;
+ __u32 oflags;
+ void *ihandle;
+ __u32 ihandlen;
+ void *ohandle;
+ __u32 *ohandlen;
+};
+
+typedef struct xfs_fsop_handlereq xfs_fsop_handlereq_t;
+
+struct xfs_attrlist_cursor {
+ __u32 opaque[4];
+};
+
+struct xfs_attrlist {
+ __s32 al_count;
+ __s32 al_more;
+ __s32 al_offset[1];
+};
+
+struct xfs_attrlist_ent {
+ __u32 a_valuelen;
+ char a_name[1];
+};
+
+struct xfs_fsop_attrlist_handlereq {
+ struct xfs_fsop_handlereq hreq;
+ struct xfs_attrlist_cursor pos;
+ __u32 flags;
+ __u32 buflen;
+ void *buffer;
+};
+
+struct xfs_attr_multiop {
+ __u32 am_opcode;
+ __s32 am_error;
+ void *am_attrname;
+ void *am_attrvalue;
+ __u32 am_length;
+ __u32 am_flags;
+};
+
+typedef struct xfs_attr_multiop xfs_attr_multiop_t;
+
+struct xfs_fsop_attrmulti_handlereq {
+ struct xfs_fsop_handlereq hreq;
+ __u32 opcount;
+ struct xfs_attr_multiop *ops;
+};
+
+typedef struct xfs_fsop_attrmulti_handlereq xfs_fsop_attrmulti_handlereq_t;
+
+typedef struct {
+ __u32 val[2];
+} xfs_fsid_t;
+
+struct xfs_fid {
+ __u16 fid_len;
+ __u16 fid_pad;
+ __u32 fid_gen;
+ __u64 fid_ino;
+};
+
+typedef struct xfs_fid xfs_fid_t;
+
+struct xfs_handle {
+ union {
+ __s64 align;
+ xfs_fsid_t _ha_fsid;
+ } ha_u;
+ xfs_fid_t ha_fid;
+};
+
+typedef struct xfs_handle xfs_handle_t;
+
+typedef struct xfs_swapext xfs_swapext_t;
+
+struct xfs_scrub_metadata {
+ __u32 sm_type;
+ __u32 sm_flags;
+ __u64 sm_ino;
+ __u32 sm_gen;
+ __u32 sm_agno;
+ __u64 sm_reserved[5];
+};
+
+struct xfs_ibulk {
+ struct xfs_mount *mp;
+ void *ubuffer;
+ xfs_ino_t startino;
+ unsigned int icount;
+ unsigned int ocount;
+ unsigned int flags;
+};
+
+struct getfsmap_info___2 {
+ struct xfs_mount *mp;
+ struct fsmap_head *data;
+ unsigned int idx;
+ __u32 last_flags;
+};
+
+struct xfs_iunlink {
+ struct rhash_head iu_rhash_head;
+ xfs_agino_t iu_agino;
+ xfs_agino_t iu_next_unlinked;
+};
+
+typedef int (*bulkstat_one_fmt_pf)(struct xfs_ibulk *, const struct xfs_bulkstat *);
+
+typedef int (*inumbers_fmt_pf)(struct xfs_ibulk *, const struct xfs_inumbers *);
+
+struct xfs_bstat_chunk {
+ bulkstat_one_fmt_pf formatter;
+ struct xfs_ibulk *breq;
+ struct xfs_bulkstat *buf;
+};
+
+struct xfs_inumbers_chunk {
+ inumbers_fmt_pf formatter;
+ struct xfs_ibulk *breq;
+};
+
+typedef int (*xfs_iwalk_fn)(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, void *);
+
+typedef int (*xfs_inobt_walk_fn)(struct xfs_mount *, struct xfs_trans *, xfs_agnumber_t, const struct xfs_inobt_rec_incore *, void *);
+
+struct xfs_pwork;
+
+typedef int (*xfs_pwork_work_fn)(struct xfs_mount *, struct xfs_pwork *);
+
+struct xfs_pwork_ctl;
+
+struct xfs_pwork {
+ struct work_struct work;
+ struct xfs_pwork_ctl *pctl;
+};
+
+struct xfs_pwork_ctl {
+ struct workqueue_struct *wq;
+ struct xfs_mount *mp;
+ xfs_pwork_work_fn work_fn;
+ struct wait_queue_head poll_wait;
+ atomic_t nr_work;
+ int error;
+};
+
+struct xfs_iwalk_ag {
+ struct xfs_pwork pwork;
+ struct xfs_mount *mp;
+ struct xfs_trans *tp;
+ xfs_ino_t startino;
+ struct xfs_inobt_rec_incore *recs;
+ unsigned int sz_recs;
+ unsigned int nr_recs;
+ xfs_iwalk_fn iwalk_fn;
+ xfs_inobt_walk_fn inobt_walk_fn;
+ void *data;
+ unsigned int trim_start: 1;
+ unsigned int skip_empty: 1;
+};
+
+typedef void (*xfs_mru_cache_free_func_t)(void *, struct xfs_mru_cache_elem *);
+
+struct xfs_mru_cache___2 {
+ struct xarray store;
+ struct list_head *lists;
+ struct list_head reap_list;
+ spinlock_t lock;
+ unsigned int grp_count;
+ unsigned int grp_time;
+ unsigned int lru_grp;
+ long unsigned int time_zero;
+ xfs_mru_cache_free_func_t free_func;
+ struct delayed_work work;
+ unsigned int queued;
+ void *data;
+};
+
+struct xstats_entry {
+ char *desc;
+ int endpoint;
+};
+
+enum xfs_dax_mode {
+ XFS_DAX_INODE = 0,
+ XFS_DAX_ALWAYS = 1,
+ XFS_DAX_NEVER = 2,
+};
+
+enum {
+ Opt_logbufs = 0,
+ Opt_logbsize = 1,
+ Opt_logdev = 2,
+ Opt_rtdev = 3,
+ Opt_wsync = 4,
+ Opt_noalign = 5,
+ Opt_swalloc = 6,
+ Opt_sunit = 7,
+ Opt_swidth = 8,
+ Opt_nouuid = 9,
+ Opt_grpid___2 = 10,
+ Opt_nogrpid___2 = 11,
+ Opt_bsdgroups = 12,
+ Opt_sysvgroups = 13,
+ Opt_allocsize = 14,
+ Opt_norecovery = 15,
+ Opt_inode64 = 16,
+ Opt_inode32 = 17,
+ Opt_ikeep = 18,
+ Opt_noikeep = 19,
+ Opt_largeio = 20,
+ Opt_nolargeio = 21,
+ Opt_attr2 = 22,
+ Opt_noattr2 = 23,
+ Opt_filestreams = 24,
+ Opt_quota___2 = 25,
+ Opt_noquota___2 = 26,
+ Opt_usrquota___2 = 27,
+ Opt_grpquota___2 = 28,
+ Opt_prjquota___2 = 29,
+ Opt_uquota = 30,
+ Opt_gquota = 31,
+ Opt_pquota = 32,
+ Opt_uqnoenforce = 33,
+ Opt_gqnoenforce = 34,
+ Opt_pqnoenforce = 35,
+ Opt_qnoenforce = 36,
+ Opt_discard___2 = 37,
+ Opt_nodiscard___2 = 38,
+ Opt_dax___2 = 39,
+ Opt_dax_enum = 40,
+};
+
+struct proc_xfs_info {
+ uint64_t flag;
+ char *str;
+};
+
+struct xfs_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *, char *);
+ ssize_t (*store)(struct kobject *, const char *, size_t);
+};
+
+struct xfs_error_init {
+ char *name;
+ int max_retries;
+ int retry_timeout;
+};
+
+struct xfs_ail_cursor {
+ struct list_head list;
+ struct xfs_log_item *item;
+};
+
+struct xfs_unmount_log_format {
+ uint16_t magic;
+ uint16_t pad1;
+ uint32_t pad2;
+};
+
+struct xlog_op_header {
+ __be32 oh_tid;
+ __be32 oh_len;
+ __u8 oh_clientid;
+ __u8 oh_flags;
+ __u16 oh_res2;
+};
+
+typedef struct xlog_op_header xlog_op_header_t;
+
+typedef struct xlog_ticket xlog_ticket_t;
+
+struct xfs_map_extent {
+ uint64_t me_owner;
+ uint64_t me_startblock;
+ uint64_t me_startoff;
+ uint32_t me_len;
+ uint32_t me_flags;
+};
+
+struct xfs_bui_log_format {
+ uint16_t bui_type;
+ uint16_t bui_size;
+ uint32_t bui_nextents;
+ uint64_t bui_id;
+ struct xfs_map_extent bui_extents[0];
+};
+
+struct xfs_bud_log_format {
+ uint16_t bud_type;
+ uint16_t bud_size;
+ uint32_t __pad;
+ uint64_t bud_bui_id;
+};
+
+struct xfs_bui_log_item {
+ struct xfs_log_item bui_item;
+ atomic_t bui_refcount;
+ atomic_t bui_next_extent;
+ struct xfs_bui_log_format bui_format;
+};
+
+struct xfs_bud_log_item {
+ struct xfs_log_item bud_item;
+ struct xfs_bui_log_item *bud_buip;
+ struct xfs_bud_log_format bud_format;
+};
+
+struct xfs_buf_cancel {
+ xfs_daddr_t bc_blkno;
+ uint bc_len;
+ int bc_refcount;
+ struct list_head bc_list;
+};
+
+struct xfs_dq_logformat {
+ uint16_t qlf_type;
+ uint16_t qlf_size;
+ xfs_dqid_t qlf_id;
+ int64_t qlf_blkno;
+ int32_t qlf_len;
+ uint32_t qlf_boffset;
+};
+
+struct xfs_qoff_logformat {
+ short unsigned int qf_type;
+ short unsigned int qf_size;
+ unsigned int qf_flags;
+ char qf_pad[12];
+};
+
+typedef struct xfs_log_iovec xfs_log_iovec_t;
+
+struct xfs_extent {
+ xfs_fsblock_t ext_start;
+ xfs_extlen_t ext_len;
+};
+
+typedef struct xfs_extent xfs_extent_t;
+
+struct xfs_extent_32 {
+ uint64_t ext_start;
+ uint32_t ext_len;
+} __attribute__((packed));
+
+typedef struct xfs_extent_32 xfs_extent_32_t;
+
+struct xfs_extent_64 {
+ uint64_t ext_start;
+ uint32_t ext_len;
+ uint32_t ext_pad;
+};
+
+typedef struct xfs_extent_64 xfs_extent_64_t;
+
+struct xfs_efi_log_format {
+ uint16_t efi_type;
+ uint16_t efi_size;
+ uint32_t efi_nextents;
+ uint64_t efi_id;
+ xfs_extent_t efi_extents[1];
+};
+
+typedef struct xfs_efi_log_format xfs_efi_log_format_t;
+
+struct xfs_efi_log_format_32 {
+ uint16_t efi_type;
+ uint16_t efi_size;
+ uint32_t efi_nextents;
+ uint64_t efi_id;
+ xfs_extent_32_t efi_extents[1];
+} __attribute__((packed));
+
+typedef struct xfs_efi_log_format_32 xfs_efi_log_format_32_t;
+
+struct xfs_efi_log_format_64 {
+ uint16_t efi_type;
+ uint16_t efi_size;
+ uint32_t efi_nextents;
+ uint64_t efi_id;
+ xfs_extent_64_t efi_extents[1];
+};
+
+typedef struct xfs_efi_log_format_64 xfs_efi_log_format_64_t;
+
+struct xfs_efd_log_format {
+ uint16_t efd_type;
+ uint16_t efd_size;
+ uint32_t efd_nextents;
+ uint64_t efd_efi_id;
+ xfs_extent_t efd_extents[1];
+};
+
+typedef struct xfs_efd_log_format xfs_efd_log_format_t;
+
+struct xfs_efi_log_item {
+ struct xfs_log_item efi_item;
+ atomic_t efi_refcount;
+ atomic_t efi_next_extent;
+ xfs_efi_log_format_t efi_format;
+};
+
+struct xfs_efd_log_item {
+ struct xfs_log_item efd_item;
+ struct xfs_efi_log_item *efd_efip;
+ uint efd_next_extent;
+ xfs_efd_log_format_t efd_format;
+};
+
+struct xfs_icreate_item {
+ struct xfs_log_item ic_item;
+ struct xfs_icreate_log ic_format;
+};
+
+struct xfs_inode_log_format_32 {
+ uint16_t ilf_type;
+ uint16_t ilf_size;
+ uint32_t ilf_fields;
+ uint16_t ilf_asize;
+ uint16_t ilf_dsize;
+ uint64_t ilf_ino;
+ union {
+ uint32_t ilfu_rdev;
+ uint8_t __pad[16];
+ } ilf_u;
+ int64_t ilf_blkno;
+ int32_t ilf_len;
+ int32_t ilf_boffset;
+} __attribute__((packed));
+
+struct xfs_phys_extent {
+ uint64_t pe_startblock;
+ uint32_t pe_len;
+ uint32_t pe_flags;
+};
+
+struct xfs_cui_log_format {
+ uint16_t cui_type;
+ uint16_t cui_size;
+ uint32_t cui_nextents;
+ uint64_t cui_id;
+ struct xfs_phys_extent cui_extents[0];
+};
+
+struct xfs_cud_log_format {
+ uint16_t cud_type;
+ uint16_t cud_size;
+ uint32_t __pad;
+ uint64_t cud_cui_id;
+};
+
+struct xfs_cui_log_item {
+ struct xfs_log_item cui_item;
+ atomic_t cui_refcount;
+ atomic_t cui_next_extent;
+ struct xfs_cui_log_format cui_format;
+};
+
+struct xfs_cud_log_item {
+ struct xfs_log_item cud_item;
+ struct xfs_cui_log_item *cud_cuip;
+ struct xfs_cud_log_format cud_format;
+};
+
+struct xfs_rui_log_format {
+ uint16_t rui_type;
+ uint16_t rui_size;
+ uint32_t rui_nextents;
+ uint64_t rui_id;
+ struct xfs_map_extent rui_extents[0];
+};
+
+struct xfs_rud_log_format {
+ uint16_t rud_type;
+ uint16_t rud_size;
+ uint32_t __pad;
+ uint64_t rud_rui_id;
+};
+
+struct xfs_rui_log_item {
+ struct xfs_log_item rui_item;
+ atomic_t rui_refcount;
+ atomic_t rui_next_extent;
+ struct xfs_rui_log_format rui_format;
+};
+
+struct xfs_rud_log_item {
+ struct xfs_log_item rud_item;
+ struct xfs_rui_log_item *rud_ruip;
+ struct xfs_rud_log_format rud_format;
+};
+
+typedef struct xfs_agi xfs_agi_t;
+
+struct p9_qid {
+ u8 type;
+ u32 version;
+ u64 path;
+};
+
+struct p9_wstat {
+ u16 size;
+ u16 type;
+ u32 dev;
+ struct p9_qid qid;
+ u32 mode;
+ u32 atime;
+ u32 mtime;
+ u64 length;
+ const char *name;
+ const char *uid;
+ const char *gid;
+ const char *muid;
+ char *extension;
+ kuid_t n_uid;
+ kgid_t n_gid;
+ kuid_t n_muid;
+};
+
+struct p9_stat_dotl {
+ u64 st_result_mask;
+ struct p9_qid qid;
+ u32 st_mode;
+ kuid_t st_uid;
+ kgid_t st_gid;
+ u64 st_nlink;
+ u64 st_rdev;
+ u64 st_size;
+ u64 st_blksize;
+ u64 st_blocks;
+ u64 st_atime_sec;
+ u64 st_atime_nsec;
+ u64 st_mtime_sec;
+ u64 st_mtime_nsec;
+ u64 st_ctime_sec;
+ u64 st_ctime_nsec;
+ u64 st_btime_sec;
+ u64 st_btime_nsec;
+ u64 st_gen;
+ u64 st_data_version;
+};
+
+struct p9_rstatfs {
+ u32 type;
+ u32 bsize;
+ u64 blocks;
+ u64 bfree;
+ u64 bavail;
+ u64 files;
+ u64 ffree;
+ u64 fsid;
+ u32 namelen;
+};
+
+enum p9_trans_status {
+ Connected = 0,
+ BeginDisconnect = 1,
+ Disconnected = 2,
+ Hung = 3,
+};
+
+struct p9_trans_module;
+
+struct p9_client {
+ spinlock_t lock;
+ unsigned int msize;
+ unsigned char proto_version;
+ struct p9_trans_module *trans_mod;
+ enum p9_trans_status status;
+ void *trans;
+ struct kmem_cache *fcall_cache;
+ union {
+ struct {
+ int rfd;
+ int wfd;
+ } fd;
+ struct {
+ u16 port;
+ bool privport;
+ } tcp;
+ } trans_opts;
+ struct idr fids;
+ struct idr reqs;
+ char name[65];
+};
+
+struct p9_fid {
+ struct p9_client *clnt;
+ u32 fid;
+ int mode;
+ struct p9_qid qid;
+ u32 iounit;
+ kuid_t uid;
+ void *rdir;
+ struct hlist_node dlist;
+};
+
+enum p9_session_flags {
+ V9FS_PROTO_2000U = 1,
+ V9FS_PROTO_2000L = 2,
+ V9FS_ACCESS_SINGLE = 4,
+ V9FS_ACCESS_USER = 8,
+ V9FS_ACCESS_CLIENT = 16,
+ V9FS_POSIX_ACL = 32,
+};
+
+enum p9_cache_modes {
+ CACHE_NONE = 0,
+ CACHE_MMAP = 1,
+ CACHE_LOOSE = 2,
+ CACHE_FSCACHE = 3,
+ nr__p9_cache_modes = 4,
+};
+
+struct v9fs_session_info {
+ unsigned char flags;
+ unsigned char nodev;
+ short unsigned int debug;
+ unsigned int afid;
+ unsigned int cache;
+ char *uname;
+ char *aname;
+ unsigned int maxdata;
+ kuid_t dfltuid;
+ kgid_t dfltgid;
+ kuid_t uid;
+ struct p9_client *clnt;
+ struct list_head slist;
+ struct rw_semaphore rename_sem;
+ long int session_lock_timeout;
+};
+
+struct v9fs_inode {
+ struct p9_qid qid;
+ unsigned int cache_validity;
+ struct p9_fid *writeback_fid;
+ struct mutex v_mutex;
+ struct inode vfs_inode;
+};
+
+enum p9_open_mode_t {
+ P9_OREAD = 0,
+ P9_OWRITE = 1,
+ P9_ORDWR = 2,
+ P9_OEXEC = 3,
+ P9_OTRUNC = 16,
+ P9_OREXEC = 32,
+ P9_ORCLOSE = 64,
+ P9_OAPPEND = 128,
+ P9_OEXCL = 4096,
+};
+
+enum p9_perm_t {
+ P9_DMDIR = -2147483648,
+ P9_DMAPPEND = 1073741824,
+ P9_DMEXCL = 536870912,
+ P9_DMMOUNT = 268435456,
+ P9_DMAUTH = 134217728,
+ P9_DMTMP = 67108864,
+ P9_DMSYMLINK = 33554432,
+ P9_DMLINK = 16777216,
+ P9_DMDEVICE = 8388608,
+ P9_DMNAMEDPIPE = 2097152,
+ P9_DMSOCKET = 1048576,
+ P9_DMSETUID = 524288,
+ P9_DMSETGID = 262144,
+ P9_DMSETVTX = 65536,
+};
+
+struct p9_iattr_dotl {
+ u32 valid;
+ u32 mode;
+ kuid_t uid;
+ kgid_t gid;
+ u64 size;
+ u64 atime_sec;
+ u64 atime_nsec;
+ u64 mtime_sec;
+ u64 mtime_nsec;
+};
+
+struct dotl_openflag_map {
+ int open_flag;
+ int dotl_flag;
+};
+
+struct dotl_iattr_map {
+ int iattr_valid;
+ int p9_iattr_valid;
+};
+
+struct p9_flock {
+ u8 type;
+ u32 flags;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
+struct p9_getlock {
+ u8 type;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
+struct p9_dirent {
+ struct p9_qid qid;
+ u64 d_off;
+ unsigned char d_type;
+ char d_name[256];
+};
+
+struct p9_rdir {
+ int head;
+ int tail;
+ uint8_t buf[0];
+};
+
+struct p9_fcall {
+ u32 size;
+ u8 id;
+ u16 tag;
+ size_t offset;
+ size_t capacity;
+ struct kmem_cache *cache;
+ u8 *sdata;
+};
+
+struct p9_req_t {
+ int status;
+ int t_err;
+ struct kref refcount;
+ wait_queue_head_t wq;
+ struct p9_fcall tc;
+ struct p9_fcall rc;
+ struct list_head req_list;
+};
+
+struct p9_trans_module {
+ struct list_head list;
+ char *name;
+ int maxsize;
+ int def;
+ struct module *owner;
+ int (*create)(struct p9_client *, const char *, char *);
+ void (*close)(struct p9_client *);
+ int (*request)(struct p9_client *, struct p9_req_t *);
+ int (*cancel)(struct p9_client *, struct p9_req_t *);
+ int (*cancelled)(struct p9_client *, struct p9_req_t *);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *, struct iov_iter *, struct iov_iter *, int, int, int);
+ int (*show_options)(struct seq_file *, struct p9_client *);
+};
+
+enum {
+ Opt_debug___2 = 0,
+ Opt_dfltuid = 1,
+ Opt_dfltgid = 2,
+ Opt_afid = 3,
+ Opt_uname = 4,
+ Opt_remotename = 5,
+ Opt_cache = 6,
+ Opt_cachetag = 7,
+ Opt_nodevmap = 8,
+ Opt_cache_loose = 9,
+ Opt_fscache = 10,
+ Opt_mmap = 11,
+ Opt_access = 12,
+ Opt_posixacl = 13,
+ Opt_locktimeout = 14,
+ Opt_err___4 = 15,
+};
+
+typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *);
+
+struct debugfs_fsdata {
+ const struct file_operations *real_fops;
+ refcount_t active_users;
+ struct completion active_users_drained;
+};
+
+struct debugfs_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
+enum {
+ Opt_uid___5 = 0,
+ Opt_gid___6 = 1,
+ Opt_mode___5 = 2,
+ Opt_err___5 = 3,
+};
+
+struct debugfs_fs_info {
+ struct debugfs_mount_opts mount_opts;
+};
+
+struct debugfs_blob_wrapper {
+ void *data;
+ long unsigned int size;
+};
+
+struct debugfs_reg32 {
+ char *name;
+ long unsigned int offset;
+};
+
+struct debugfs_regset32 {
+ const struct debugfs_reg32 *regs;
+ int nregs;
+ void *base;
+ struct device *dev;
+};
+
+struct array_data {
+ void *array;
+ u32 elements;
+};
+
+struct debugfs_devm_entry {
+ int (*read)(struct seq_file *, void *);
+ struct device *dev;
+};
+
+struct tracefs_dir_ops {
+ int (*mkdir)(const char *);
+ int (*rmdir)(const char *);
+};
+
+struct tracefs_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
+struct tracefs_fs_info {
+ struct tracefs_mount_opts mount_opts;
+};
+
+typedef unsigned int __kernel_mode_t;
+
+struct ipc_perm {
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ short unsigned int seq;
+};
+
+struct ipc64_perm {
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned char __pad1[0];
+ short unsigned int seq;
+ short unsigned int __pad2;
+ __kernel_ulong_t __unused1;
+ __kernel_ulong_t __unused2;
+};
+
+struct ipc_params {
+ key_t key;
+ int flg;
+ union {
+ size_t size;
+ int nsems;
+ } u;
+};
+
+struct ipc_ops {
+ int (*getnew)(struct ipc_namespace *, struct ipc_params *);
+ int (*associate)(struct kern_ipc_perm *, int);
+ int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *);
+};
+
+struct ipc_proc_iface {
+ const char *path;
+ const char *header;
+ int ids;
+ int (*show)(struct seq_file *, void *);
+};
+
+struct ipc_proc_iter {
+ struct ipc_namespace *ns;
+ struct pid_namespace *pid_ns;
+ struct ipc_proc_iface *iface;
+};
+
+struct msg_msgseg;
+
+struct msg_msg {
+ struct list_head m_list;
+ long int m_type;
+ size_t m_ts;
+ struct msg_msgseg *next;
+ void *security;
+};
+
+struct msg_msgseg {
+ struct msg_msgseg *next;
+};
+
+typedef int __kernel_ipc_pid_t;
+
+struct msgbuf {
+ __kernel_long_t mtype;
+ char mtext[1];
+};
+
+struct msg;
+
+struct msqid_ds {
+ struct ipc_perm msg_perm;
+ struct msg *msg_first;
+ struct msg *msg_last;
+ __kernel_old_time_t msg_stime;
+ __kernel_old_time_t msg_rtime;
+ __kernel_old_time_t msg_ctime;
+ long unsigned int msg_lcbytes;
+ long unsigned int msg_lqbytes;
+ short unsigned int msg_cbytes;
+ short unsigned int msg_qnum;
+ short unsigned int msg_qbytes;
+ __kernel_ipc_pid_t msg_lspid;
+ __kernel_ipc_pid_t msg_lrpid;
+};
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ long int msg_stime;
+ long int msg_rtime;
+ long int msg_ctime;
+ long unsigned int msg_cbytes;
+ long unsigned int msg_qnum;
+ long unsigned int msg_qbytes;
+ __kernel_pid_t msg_lspid;
+ __kernel_pid_t msg_lrpid;
+ long unsigned int __unused4;
+ long unsigned int __unused5;
+};
+
+struct msginfo {
+ int msgpool;
+ int msgmap;
+ int msgmax;
+ int msgmnb;
+ int msgmni;
+ int msgssz;
+ int msgtql;
+ short unsigned int msgseg;
+};
+
+struct msg_queue {
+ struct kern_ipc_perm q_perm;
+ time64_t q_stime;
+ time64_t q_rtime;
+ time64_t q_ctime;
+ long unsigned int q_cbytes;
+ long unsigned int q_qnum;
+ long unsigned int q_qbytes;
+ struct pid *q_lspid;
+ struct pid *q_lrpid;
+ struct list_head q_messages;
+ struct list_head q_receivers;
+ struct list_head q_senders;
+ long: 64;
+ long: 64;
+};
+
+struct msg_receiver {
+ struct list_head r_list;
+ struct task_struct *r_tsk;
+ int r_mode;
+ long int r_msgtype;
+ long int r_maxsize;
+ struct msg_msg *r_msg;
+};
+
+struct msg_sender {
+ struct list_head list;
+ struct task_struct *tsk;
+ size_t msgsz;
+};
+
+struct sem;
+
+struct sem_queue;
+
+struct sem_undo;
+
+struct semid_ds {
+ struct ipc_perm sem_perm;
+ __kernel_old_time_t sem_otime;
+ __kernel_old_time_t sem_ctime;
+ struct sem *sem_base;
+ struct sem_queue *sem_pending;
+ struct sem_queue **sem_pending_last;
+ struct sem_undo *undo;
+ short unsigned int sem_nsems;
+};
+
+struct sem {
+ int semval;
+ struct pid *sempid;
+ spinlock_t lock;
+ struct list_head pending_alter;
+ struct list_head pending_const;
+ time64_t sem_otime;
+ long: 64;
+};
+
+struct sembuf;
+
+struct sem_queue {
+ struct list_head list;
+ struct task_struct *sleeper;
+ struct sem_undo *undo;
+ struct pid *pid;
+ int status;
+ struct sembuf *sops;
+ struct sembuf *blocking;
+ int nsops;
+ bool alter;
+ bool dupsop;
+};
+
+struct sem_undo {
+ struct list_head list_proc;
+ struct callback_head rcu;
+ struct sem_undo_list *ulp;
+ struct list_head list_id;
+ int semid;
+ short int *semadj;
+};
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm;
+ __kernel_long_t sem_otime;
+ __kernel_ulong_t __unused1;
+ __kernel_long_t sem_ctime;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t sem_nsems;
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
+};
+
+struct sembuf {
+ short unsigned int sem_num;
+ short int sem_op;
+ short int sem_flg;
+};
+
+struct seminfo {
+ int semmap;
+ int semmni;
+ int semmns;
+ int semmnu;
+ int semmsl;
+ int semopm;
+ int semume;
+ int semusz;
+ int semvmx;
+ int semaem;
+};
+
+struct sem_undo_list {
+ refcount_t refcnt;
+ spinlock_t lock;
+ struct list_head list_proc;
+};
+
+struct sem_array {
+ struct kern_ipc_perm sem_perm;
+ time64_t sem_ctime;
+ struct list_head pending_alter;
+ struct list_head pending_const;
+ struct list_head list_id;
+ int sem_nsems;
+ int complex_count;
+ unsigned int use_global_lock;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct sem sems[0];
+};
+
+struct shmid_ds {
+ struct ipc_perm shm_perm;
+ int shm_segsz;
+ __kernel_old_time_t shm_atime;
+ __kernel_old_time_t shm_dtime;
+ __kernel_old_time_t shm_ctime;
+ __kernel_ipc_pid_t shm_cpid;
+ __kernel_ipc_pid_t shm_lpid;
+ short unsigned int shm_nattch;
+ short unsigned int shm_unused;
+ void *shm_unused2;
+ void *shm_unused3;
+};
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm;
+ size_t shm_segsz;
+ long int shm_atime;
+ long int shm_dtime;
+ long int shm_ctime;
+ __kernel_pid_t shm_cpid;
+ __kernel_pid_t shm_lpid;
+ long unsigned int shm_nattch;
+ long unsigned int __unused4;
+ long unsigned int __unused5;
+};
+
+struct shminfo64 {
+ long unsigned int shmmax;
+ long unsigned int shmmin;
+ long unsigned int shmmni;
+ long unsigned int shmseg;
+ long unsigned int shmall;
+ long unsigned int __unused1;
+ long unsigned int __unused2;
+ long unsigned int __unused3;
+ long unsigned int __unused4;
+};
+
+struct shminfo {
+ int shmmax;
+ int shmmin;
+ int shmmni;
+ int shmseg;
+ int shmall;
+};
+
+struct shm_info {
+ int used_ids;
+ __kernel_ulong_t shm_tot;
+ __kernel_ulong_t shm_rss;
+ __kernel_ulong_t shm_swp;
+ __kernel_ulong_t swap_attempts;
+ __kernel_ulong_t swap_successes;
+};
+
+struct shmid_kernel {
+ struct kern_ipc_perm shm_perm;
+ struct file *shm_file;
+ long unsigned int shm_nattch;
+ long unsigned int shm_segsz;
+ time64_t shm_atim;
+ time64_t shm_dtim;
+ time64_t shm_ctim;
+ struct pid *shm_cprid;
+ struct pid *shm_lprid;
+ struct user_struct *mlock_user;
+ struct task_struct *shm_creator;
+ struct list_head shm_clist;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct shm_file_data {
+ int id;
+ struct ipc_namespace *ns;
+ struct file *file;
+ const struct vm_operations_struct *vm_ops;
+};
+
+struct mqueue_fs_context {
+ struct ipc_namespace *ipc_ns;
+};
+
+struct posix_msg_tree_node {
+ struct rb_node rb_node;
+ struct list_head msg_list;
+ int priority;
+};
+
+struct ext_wait_queue {
+ struct task_struct *task;
+ struct list_head list;
+ struct msg_msg *msg;
+ int state;
+};
+
+struct mqueue_inode_info {
+ spinlock_t lock;
+ struct inode vfs_inode;
+ wait_queue_head_t wait_q;
+ struct rb_root msg_tree;
+ struct rb_node *msg_tree_rightmost;
+ struct posix_msg_tree_node *node_cache;
+ struct mq_attr attr;
+ struct sigevent notify;
+ struct pid *notify_owner;
+ u32 notify_self_exec_id;
+ struct user_namespace *notify_user_ns;
+ struct user_struct *user;
+ struct sock *notify_sock;
+ struct sk_buff *notify_cookie;
+ struct ext_wait_queue e_wait_q[2];
+ long unsigned int qsize;
+};
+
+struct vfs_cap_data {
+ __le32 magic_etc;
+ struct {
+ __le32 permitted;
+ __le32 inheritable;
+ } data[2];
+};
+
+struct vfs_ns_cap_data {
+ __le32 magic_etc;
+ struct {
+ __le32 permitted;
+ __le32 inheritable;
+ } data[2];
+ __le32 rootid;
+};
+
+struct fs_parameter___2;
+
+struct perf_event_attr___2;
+
+union security_list_options {
+ int (*binder_set_context_mgr)(struct task_struct *);
+ int (*binder_transaction)(struct task_struct *, struct task_struct *);
+ int (*binder_transfer_binder)(struct task_struct *, struct task_struct *);
+ int (*binder_transfer_file)(struct task_struct *, struct task_struct *, struct file *);
+ int (*ptrace_access_check)(struct task_struct *, unsigned int);
+ int (*ptrace_traceme)(struct task_struct *);
+ int (*capget)(struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *);
+ int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *);
+ int (*capable)(const struct cred *, struct user_namespace *, int, unsigned int);
+ int (*quotactl)(int, int, int, struct super_block *);
+ int (*quota_on)(struct dentry *);
+ int (*syslog)(int);
+ int (*settime)(const struct timespec64 *, const struct timezone *);
+ int (*vm_enough_memory)(struct mm_struct *, long int);
+ int (*bprm_creds_for_exec)(struct linux_binprm *);
+ int (*bprm_creds_from_file)(struct linux_binprm *, struct file *);
+ int (*bprm_check_security)(struct linux_binprm *);
+ void (*bprm_committing_creds)(struct linux_binprm *);
+ void (*bprm_committed_creds)(struct linux_binprm *);
+ int (*fs_context_dup)(struct fs_context *, struct fs_context *);
+ int (*fs_context_parse_param)(struct fs_context *, struct fs_parameter___2 *);
+ int (*sb_alloc_security)(struct super_block *);
+ void (*sb_free_security)(struct super_block *);
+ void (*sb_free_mnt_opts)(void *);
+ int (*sb_eat_lsm_opts)(char *, void **);
+ int (*sb_remount)(struct super_block *, void *);
+ int (*sb_kern_mount)(struct super_block *);
+ int (*sb_show_options)(struct seq_file *, struct super_block *);
+ int (*sb_statfs)(struct dentry *);
+ int (*sb_mount)(const char *, const struct path *, const char *, long unsigned int, void *);
+ int (*sb_umount)(struct vfsmount *, int);
+ int (*sb_pivotroot)(const struct path *, const struct path *);
+ int (*sb_set_mnt_opts)(struct super_block *, void *, long unsigned int, long unsigned int *);
+ int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, long unsigned int, long unsigned int *);
+ int (*sb_add_mnt_opt)(const char *, const char *, int, void **);
+ int (*move_mount)(const struct path *, const struct path *);
+ int (*dentry_init_security)(struct dentry *, int, const struct qstr *, void **, u32 *);
+ int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *);
+ int (*path_notify)(const struct path *, u64, unsigned int);
+ int (*inode_alloc_security)(struct inode *);
+ void (*inode_free_security)(struct inode *);
+ int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, const char **, void **, size_t *);
+ int (*inode_create)(struct inode *, struct dentry *, umode_t);
+ int (*inode_link)(struct dentry *, struct inode *, struct dentry *);
+ int (*inode_unlink)(struct inode *, struct dentry *);
+ int (*inode_symlink)(struct inode *, struct dentry *, const char *);
+ int (*inode_mkdir)(struct inode *, struct dentry *, umode_t);
+ int (*inode_rmdir)(struct inode *, struct dentry *);
+ int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t);
+ int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);
+ int (*inode_readlink)(struct dentry *);
+ int (*inode_follow_link)(struct dentry *, struct inode *, bool);
+ int (*inode_permission)(struct inode *, int);
+ int (*inode_setattr)(struct dentry *, struct iattr *);
+ int (*inode_getattr)(const struct path *);
+ int (*inode_setxattr)(struct dentry *, const char *, const void *, size_t, int);
+ void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int);
+ int (*inode_getxattr)(struct dentry *, const char *);
+ int (*inode_listxattr)(struct dentry *);
+ int (*inode_removexattr)(struct dentry *, const char *);
+ int (*inode_need_killpriv)(struct dentry *);
+ int (*inode_killpriv)(struct dentry *);
+ int (*inode_getsecurity)(struct inode *, const char *, void **, bool);
+ int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int);
+ int (*inode_listsecurity)(struct inode *, char *, size_t);
+ void (*inode_getsecid)(struct inode *, u32 *);
+ int (*inode_copy_up)(struct dentry *, struct cred **);
+ int (*inode_copy_up_xattr)(const char *);
+ int (*kernfs_init_security)(struct kernfs_node *, struct kernfs_node *);
+ int (*file_permission)(struct file *, int);
+ int (*file_alloc_security)(struct file *);
+ void (*file_free_security)(struct file *);
+ int (*file_ioctl)(struct file *, unsigned int, long unsigned int);
+ int (*mmap_addr)(long unsigned int);
+ int (*mmap_file)(struct file *, long unsigned int, long unsigned int, long unsigned int);
+ int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int);
+ int (*file_lock)(struct file *, unsigned int);
+ int (*file_fcntl)(struct file *, unsigned int, long unsigned int);
+ void (*file_set_fowner)(struct file *);
+ int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int);
+ int (*file_receive)(struct file *);
+ int (*file_open)(struct file *);
+ int (*task_alloc)(struct task_struct *, long unsigned int);
+ void (*task_free)(struct task_struct *);
+ int (*cred_alloc_blank)(struct cred *, gfp_t);
+ void (*cred_free)(struct cred *);
+ int (*cred_prepare)(struct cred *, const struct cred *, gfp_t);
+ void (*cred_transfer)(struct cred *, const struct cred *);
+ void (*cred_getsecid)(const struct cred *, u32 *);
+ int (*kernel_act_as)(struct cred *, u32);
+ int (*kernel_create_files_as)(struct cred *, struct inode *);
+ int (*kernel_module_request)(char *);
+ int (*kernel_load_data)(enum kernel_load_data_id);
+ int (*kernel_read_file)(struct file *, enum kernel_read_file_id);
+ int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id);
+ int (*task_fix_setuid)(struct cred *, const struct cred *, int);
+ int (*task_fix_setgid)(struct cred *, const struct cred *, int);
+ int (*task_setpgid)(struct task_struct *, pid_t);
+ int (*task_getpgid)(struct task_struct *);
+ int (*task_getsid)(struct task_struct *);
+ void (*task_getsecid)(struct task_struct *, u32 *);
+ int (*task_setnice)(struct task_struct *, int);
+ int (*task_setioprio)(struct task_struct *, int);
+ int (*task_getioprio)(struct task_struct *);
+ int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int);
+ int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *);
+ int (*task_setscheduler)(struct task_struct *);
+ int (*task_getscheduler)(struct task_struct *);
+ int (*task_movememory)(struct task_struct *);
+ int (*task_kill)(struct task_struct *, struct kernel_siginfo *, int, const struct cred *);
+ int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
+ void (*task_to_inode)(struct task_struct *, struct inode *);
+ int (*ipc_permission)(struct kern_ipc_perm *, short int);
+ void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *);
+ int (*msg_msg_alloc_security)(struct msg_msg *);
+ void (*msg_msg_free_security)(struct msg_msg *);
+ int (*msg_queue_alloc_security)(struct kern_ipc_perm *);
+ void (*msg_queue_free_security)(struct kern_ipc_perm *);
+ int (*msg_queue_associate)(struct kern_ipc_perm *, int);
+ int (*msg_queue_msgctl)(struct kern_ipc_perm *, int);
+ int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int);
+ int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int);
+ int (*shm_alloc_security)(struct kern_ipc_perm *);
+ void (*shm_free_security)(struct kern_ipc_perm *);
+ int (*shm_associate)(struct kern_ipc_perm *, int);
+ int (*shm_shmctl)(struct kern_ipc_perm *, int);
+ int (*shm_shmat)(struct kern_ipc_perm *, char *, int);
+ int (*sem_alloc_security)(struct kern_ipc_perm *);
+ void (*sem_free_security)(struct kern_ipc_perm *);
+ int (*sem_associate)(struct kern_ipc_perm *, int);
+ int (*sem_semctl)(struct kern_ipc_perm *, int);
+ int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int);
+ int (*netlink_send)(struct sock *, struct sk_buff *);
+ void (*d_instantiate)(struct dentry *, struct inode *);
+ int (*getprocattr)(struct task_struct *, char *, char **);
+ int (*setprocattr)(const char *, void *, size_t);
+ int (*ismaclabel)(const char *);
+ int (*secid_to_secctx)(u32, char **, u32 *);
+ int (*secctx_to_secid)(const char *, u32, u32 *);
+ void (*release_secctx)(char *, u32);
+ void (*inode_invalidate_secctx)(struct inode *);
+ int (*inode_notifysecctx)(struct inode *, void *, u32);
+ int (*inode_setsecctx)(struct dentry *, void *, u32);
+ int (*inode_getsecctx)(struct inode *, void **, u32 *);
+ int (*audit_rule_init)(u32, u32, char *, void **);
+ int (*audit_rule_known)(struct audit_krule *);
+ int (*audit_rule_match)(u32, u32, u32, void *);
+ void (*audit_rule_free)(void *);
+ int (*bpf)(int, union bpf_attr *, unsigned int);
+ int (*bpf_map)(struct bpf_map *, fmode_t);
+ int (*bpf_prog)(struct bpf_prog *);
+ int (*bpf_map_alloc_security)(struct bpf_map *);
+ void (*bpf_map_free_security)(struct bpf_map *);
+ int (*bpf_prog_alloc_security)(struct bpf_prog_aux *);
+ void (*bpf_prog_free_security)(struct bpf_prog_aux *);
+ int (*locked_down)(enum lockdown_reason);
+ int (*perf_event_open)(struct perf_event_attr___2 *, int);
+ int (*perf_event_alloc)(struct perf_event *);
+ void (*perf_event_free)(struct perf_event *);
+ int (*perf_event_read)(struct perf_event *);
+ int (*perf_event_write)(struct perf_event *);
+};
+
+struct security_hook_list {
+ struct hlist_node list;
+ struct hlist_head *head;
+ union security_list_options hook;
+ char *lsm;
+};
+
+enum lsm_event {
+ LSM_POLICY_CHANGE = 0,
+};
+
+typedef int (*initxattrs)(struct inode *, const struct xattr *, void *);
+
+typedef __u16 __sum16;
+
+struct sockaddr_un {
+ __kernel_sa_family_t sun_family;
+ char sun_path[108];
+};
+
+struct unix_address {
+ refcount_t refcnt;
+ int len;
+ unsigned int hash;
+ struct sockaddr_un name[0];
+};
+
+struct scm_stat {
+ atomic_t nr_fds;
+};
+
+struct unix_sock {
+ struct sock sk;
+ struct unix_address *addr;
+ struct path path;
+ struct mutex iolock;
+ struct mutex bindlock;
+ struct sock *peer;
+ struct list_head link;
+ atomic_long_t inflight;
+ spinlock_t lock;
+ long unsigned int gc_flags;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct socket_wq peer_wq;
+ wait_queue_entry_t peer_wake;
+ struct scm_stat scm_stat;
+ long: 32;
+ long: 64;
+ long: 64;
+};
+
+struct in6_pktinfo {
+ struct in6_addr ipi6_addr;
+ int ipi6_ifindex;
+};
+
+struct ipv6_rt_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+};
+
+struct ipv6_opt_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+};
+
+struct ipv6hdr {
+ __u8 priority: 4;
+ __u8 version: 4;
+ __u8 flow_lbl[3];
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+};
+
+struct ip_options {
+ __be32 faddr;
+ __be32 nexthop;
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_strictroute: 1;
+ unsigned char srr_is_hit: 1;
+ unsigned char is_changed: 1;
+ unsigned char rr_needaddr: 1;
+ unsigned char ts_needtime: 1;
+ unsigned char ts_needaddr: 1;
+ unsigned char router_alert;
+ unsigned char cipso;
+ unsigned char __pad2;
+ unsigned char __data[0];
+};
+
+struct ip_options_rcu {
+ struct callback_head rcu;
+ struct ip_options opt;
+};
+
+struct ipv6_txoptions {
+ refcount_t refcnt;
+ int tot_len;
+ __u16 opt_flen;
+ __u16 opt_nflen;
+ struct ipv6_opt_hdr *hopopt;
+ struct ipv6_opt_hdr *dst0opt;
+ struct ipv6_rt_hdr *srcrt;
+ struct ipv6_opt_hdr *dst1opt;
+ struct callback_head rcu;
+};
+
+struct inet_cork {
+ unsigned int flags;
+ __be32 addr;
+ struct ip_options *opt;
+ unsigned int fragsize;
+ int length;
+ struct dst_entry *dst;
+ u8 tx_flags;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
+ __u16 gso_size;
+ u64 transmit_time;
+ u32 mark;
+};
+
+struct inet_cork_full {
+ struct inet_cork base;
+ struct flowi fl;
+};
+
+struct ipv6_pinfo;
+
+struct ip_mc_socklist;
+
+struct inet_sock {
+ struct sock sk;
+ struct ipv6_pinfo *pinet6;
+ __be32 inet_saddr;
+ __s16 uc_ttl;
+ __u16 cmsg_flags;
+ __be16 inet_sport;
+ __u16 inet_id;
+ struct ip_options_rcu *inet_opt;
+ int rx_dst_ifindex;
+ __u8 tos;
+ __u8 min_ttl;
+ __u8 mc_ttl;
+ __u8 pmtudisc;
+ __u8 recverr: 1;
+ __u8 is_icsk: 1;
+ __u8 freebind: 1;
+ __u8 hdrincl: 1;
+ __u8 mc_loop: 1;
+ __u8 transparent: 1;
+ __u8 mc_all: 1;
+ __u8 nodefrag: 1;
+ __u8 bind_address_no_port: 1;
+ __u8 defer_connect: 1;
+ __u8 rcv_tos;
+ __u8 convert_csum;
+ int uc_index;
+ int mc_index;
+ __be32 mc_addr;
+ struct ip_mc_socklist *mc_list;
+ struct inet_cork_full cork;
+};
+
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+};
+
+struct ipv6_mc_socklist;
+
+struct ipv6_ac_socklist;
+
+struct ipv6_fl_socklist;
+
+struct ipv6_pinfo {
+ struct in6_addr saddr;
+ struct in6_pktinfo sticky_pktinfo;
+ const struct in6_addr *daddr_cache;
+ __be32 flow_label;
+ __u32 frag_size;
+ __u16 __unused_1: 7;
+ __s16 hop_limit: 9;
+ __u16 mc_loop: 1;
+ __u16 __unused_2: 6;
+ __s16 mcast_hops: 9;
+ int ucast_oif;
+ int mcast_oif;
+ union {
+ struct {
+ __u16 srcrt: 1;
+ __u16 osrcrt: 1;
+ __u16 rxinfo: 1;
+ __u16 rxoinfo: 1;
+ __u16 rxhlim: 1;
+ __u16 rxohlim: 1;
+ __u16 hopopts: 1;
+ __u16 ohopopts: 1;
+ __u16 dstopts: 1;
+ __u16 odstopts: 1;
+ __u16 rxflow: 1;
+ __u16 rxtclass: 1;
+ __u16 rxpmtu: 1;
+ __u16 rxorigdstaddr: 1;
+ __u16 recvfragsize: 1;
+ } bits;
+ __u16 all;
+ } rxopt;
+ __u16 recverr: 1;
+ __u16 sndflow: 1;
+ __u16 repflow: 1;
+ __u16 pmtudisc: 3;
+ __u16 padding: 1;
+ __u16 srcprefs: 3;
+ __u16 dontfrag: 1;
+ __u16 autoflowlabel: 1;
+ __u16 autoflowlabel_set: 1;
+ __u16 mc_all: 1;
+ __u16 rtalert_isolate: 1;
+ __u8 min_hopcount;
+ __u8 tclass;
+ __be32 rcv_flowinfo;
+ __u32 dst_cookie;
+ __u32 rx_dst_cookie;
+ struct ipv6_mc_socklist *ipv6_mc_list;
+ struct ipv6_ac_socklist *ipv6_ac_list;
+ struct ipv6_fl_socklist *ipv6_fl_list;
+ struct ipv6_txoptions *opt;
+ struct sk_buff *pktoptions;
+ struct sk_buff *rxpmtu;
+ struct inet6_cork cork;
+};
+
+struct tcphdr {
+ __be16 source;
+ __be16 dest;
+ __be32 seq;
+ __be32 ack_seq;
+ __u16 res1: 4;
+ __u16 doff: 4;
+ __u16 fin: 1;
+ __u16 syn: 1;
+ __u16 rst: 1;
+ __u16 psh: 1;
+ __u16 ack: 1;
+ __u16 urg: 1;
+ __u16 ece: 1;
+ __u16 cwr: 1;
+ __be16 window;
+ __sum16 check;
+ __be16 urg_ptr;
+};
+
+struct udphdr {
+ __be16 source;
+ __be16 dest;
+ __be16 len;
+ __sum16 check;
+};
+
+struct ip6_sf_socklist;
+
+struct ipv6_mc_socklist {
+ struct in6_addr addr;
+ int ifindex;
+ unsigned int sfmode;
+ struct ipv6_mc_socklist *next;
+ rwlock_t sflock;
+ struct ip6_sf_socklist *sflist;
+ struct callback_head rcu;
+};
+
+struct ipv6_ac_socklist {
+ struct in6_addr acl_addr;
+ int acl_ifindex;
+ struct ipv6_ac_socklist *acl_next;
+};
+
+struct ip6_flowlabel;
+
+struct ipv6_fl_socklist {
+ struct ipv6_fl_socklist *next;
+ struct ip6_flowlabel *fl;
+ struct callback_head rcu;
+};
+
+struct iphdr {
+ __u8 ihl: 4;
+ __u8 version: 4;
+ __u8 tos;
+ __be16 tot_len;
+ __be16 id;
+ __be16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+ __be32 saddr;
+ __be32 daddr;
+};
+
+struct ip6_hdr {
+ union
+ {
+ struct ip6_hdrctl
+ {
+ uint32_t ip6_un1_flow; /* 4 bits version, 8 bits TC,
+ 20 bits flow-ID */
+ uint16_t ip6_un1_plen; /* payload length */
+ uint8_t ip6_un1_nxt; /* next header */
+ uint8_t ip6_un1_hlim; /* hop limit */
+ } ip6_un1;
+ uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits tclass */
+ } ip6_ctlun;
+ struct in6_addr ip6_src; /* source address */
+ struct in6_addr ip6_dst; /* destination address */
+};
+
+#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
+
+struct ip6_sf_socklist {
+ unsigned int sl_max;
+ unsigned int sl_count;
+ struct in6_addr sl_addr[0];
+};
+
+struct ip6_flowlabel {
+ struct ip6_flowlabel *next;
+ __be32 label;
+ atomic_t users;
+ struct in6_addr dst;
+ struct ipv6_txoptions *opt;
+ long unsigned int linger;
+ struct callback_head rcu;
+ u8 share;
+ union {
+ struct pid *pid;
+ kuid_t uid;
+ } owner;
+ long unsigned int lastuse;
+ long unsigned int expires;
+ struct net *fl_net;
+};
+
+struct dccp_hdr {
+ __be16 dccph_sport;
+ __be16 dccph_dport;
+ __u8 dccph_doff;
+ __u8 dccph_cscov: 4;
+ __u8 dccph_ccval: 4;
+ __sum16 dccph_checksum;
+ __u8 dccph_x: 1;
+ __u8 dccph_type: 4;
+ __u8 dccph_reserved: 3;
+ __u8 dccph_seq2;
+ __be16 dccph_seq;
+};
+
+struct sctphdr {
+ __be16 source;
+ __be16 dest;
+ __be32 vtag;
+ __le32 checksum;
+};
+
+union ib_gid {
+ u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+struct lsm_network_audit {
+ int netif;
+ struct sock *sk;
+ u16 family;
+ __be16 dport;
+ __be16 sport;
+ union {
+ struct {
+ __be32 daddr;
+ __be32 saddr;
+ } v4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } v6;
+ } fam;
+};
+
+struct lsm_ioctlop_audit {
+ struct path path;
+ u16 cmd;
+};
+
+struct lsm_ibpkey_audit {
+ u64 subnet_prefix;
+ u16 pkey;
+};
+
+struct lsm_ibendport_audit {
+ char dev_name[64];
+ u8 port;
+};
+
+struct common_audit_data {
+ char type;
+ union {
+ struct path path;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct lsm_network_audit *net;
+ int cap;
+ int ipc_id;
+ struct task_struct *tsk;
+ char *kmod_name;
+ struct lsm_ioctlop_audit *op;
+ struct file *file;
+ struct lsm_ibpkey_audit *ibpkey;
+ struct lsm_ibendport_audit *ibendport;
+ int reason;
+ } u;
+ union { };
+};
+
+enum devcg_behavior {
+ DEVCG_DEFAULT_NONE = 0,
+ DEVCG_DEFAULT_ALLOW = 1,
+ DEVCG_DEFAULT_DENY = 2,
+};
+
+struct dev_exception_item {
+ u32 major;
+ u32 minor;
+ short int type;
+ short int access;
+ struct list_head list;
+ struct callback_head rcu;
+};
+
+struct dev_cgroup {
+ struct cgroup_subsys_state css;
+ struct list_head exceptions;
+ enum devcg_behavior behavior;
+};
+
+struct crypto_async_request;
+
+typedef void (*crypto_completion_t)(struct crypto_async_request *, int);
+
+struct crypto_async_request {
+ struct list_head list;
+ crypto_completion_t complete;
+ void *data;
+ struct crypto_tfm *tfm;
+ u32 flags;
+};
+
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
+
+struct crypto_template;
+
+struct crypto_spawn;
+
+struct crypto_instance {
+ struct crypto_alg alg;
+ struct crypto_template *tmpl;
+ union {
+ struct hlist_node list;
+ struct crypto_spawn *spawns;
+ };
+ void *__ctx[0];
+};
+
+struct crypto_spawn {
+ struct list_head list;
+ struct crypto_alg *alg;
+ union {
+ struct crypto_instance *inst;
+ struct crypto_spawn *next;
+ };
+ const struct crypto_type *frontend;
+ u32 mask;
+ bool dead;
+ bool registered;
+};
+
+struct rtattr;
+
+struct crypto_template {
+ struct list_head list;
+ struct hlist_head instances;
+ struct module *module;
+ int (*create)(struct crypto_template *, struct rtattr **);
+ char name[128];
+};
+
+enum {
+ CRYPTO_MSG_ALG_REQUEST = 0,
+ CRYPTO_MSG_ALG_REGISTER = 1,
+ CRYPTO_MSG_ALG_LOADED = 2,
+};
+
+struct crypto_larval {
+ struct crypto_alg alg;
+ struct crypto_alg *adult;
+ struct completion completion;
+ u32 mask;
+};
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_comp {
+ struct crypto_tfm base;
+};
+
+enum {
+ CRYPTOA_UNSPEC = 0,
+ CRYPTOA_ALG = 1,
+ CRYPTOA_TYPE = 2,
+ CRYPTOA_U32 = 3,
+ __CRYPTOA_MAX = 4,
+};
+
+struct crypto_attr_alg {
+ char name[128];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+struct crypto_attr_u32 {
+ u32 num;
+};
+
+struct rtattr {
+ short unsigned int rta_len;
+ short unsigned int rta_type;
+};
+
+struct crypto_queue {
+ struct list_head list;
+ struct list_head *backlog;
+ unsigned int qlen;
+ unsigned int max_qlen;
+};
+
+struct scatter_walk {
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
+struct aead_request {
+ struct crypto_async_request base;
+ unsigned int assoclen;
+ unsigned int cryptlen;
+ u8 *iv;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ void *__ctx[0];
+};
+
+struct crypto_aead;
+
+struct aead_alg {
+ int (*setkey)(struct crypto_aead *, const u8 *, unsigned int);
+ int (*setauthsize)(struct crypto_aead *, unsigned int);
+ int (*encrypt)(struct aead_request *);
+ int (*decrypt)(struct aead_request *);
+ int (*init)(struct crypto_aead *);
+ void (*exit)(struct crypto_aead *);
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+ unsigned int chunksize;
+ struct crypto_alg base;
+};
+
+struct crypto_aead {
+ unsigned int authsize;
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+struct aead_instance {
+ void (*free)(struct aead_instance *);
+ union {
+ struct {
+ char head[64];
+ struct crypto_instance base;
+ } s;
+ struct aead_alg alg;
+ };
+};
+
+struct crypto_aead_spawn {
+ struct crypto_spawn base;
+};
+
+enum crypto_attr_type_t {
+ CRYPTOCFGA_UNSPEC = 0,
+ CRYPTOCFGA_PRIORITY_VAL = 1,
+ CRYPTOCFGA_REPORT_LARVAL = 2,
+ CRYPTOCFGA_REPORT_HASH = 3,
+ CRYPTOCFGA_REPORT_BLKCIPHER = 4,
+ CRYPTOCFGA_REPORT_AEAD = 5,
+ CRYPTOCFGA_REPORT_COMPRESS = 6,
+ CRYPTOCFGA_REPORT_RNG = 7,
+ CRYPTOCFGA_REPORT_CIPHER = 8,
+ CRYPTOCFGA_REPORT_AKCIPHER = 9,
+ CRYPTOCFGA_REPORT_KPP = 10,
+ CRYPTOCFGA_REPORT_ACOMP = 11,
+ CRYPTOCFGA_STAT_LARVAL = 12,
+ CRYPTOCFGA_STAT_HASH = 13,
+ CRYPTOCFGA_STAT_BLKCIPHER = 14,
+ CRYPTOCFGA_STAT_AEAD = 15,
+ CRYPTOCFGA_STAT_COMPRESS = 16,
+ CRYPTOCFGA_STAT_RNG = 17,
+ CRYPTOCFGA_STAT_CIPHER = 18,
+ CRYPTOCFGA_STAT_AKCIPHER = 19,
+ CRYPTOCFGA_STAT_KPP = 20,
+ CRYPTOCFGA_STAT_ACOMP = 21,
+ __CRYPTOCFGA_MAX = 22,
+};
+
+struct crypto_report_aead {
+ char type[64];
+ char geniv[64];
+ unsigned int blocksize;
+ unsigned int maxauthsize;
+ unsigned int ivsize;
+};
+
+struct crypto_sync_skcipher;
+
+struct aead_geniv_ctx {
+ spinlock_t lock;
+ struct crypto_aead *child;
+ struct crypto_sync_skcipher *sknull;
+ u8 salt[0];
+};
+
+struct crypto_rng;
+
+struct rng_alg {
+ int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int);
+ int (*seed)(struct crypto_rng *, const u8 *, unsigned int);
+ void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int);
+ unsigned int seedsize;
+ struct crypto_alg base;
+};
+
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+struct skcipher_request {
+ unsigned int cryptlen;
+ u8 *iv;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ struct crypto_async_request base;
+ void *__ctx[0];
+};
+
+struct crypto_skcipher {
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+struct crypto_sync_skcipher___2 {
+ struct crypto_skcipher base;
+};
+
+struct skcipher_alg {
+ int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int);
+ int (*encrypt)(struct skcipher_request *);
+ int (*decrypt)(struct skcipher_request *);
+ int (*init)(struct crypto_skcipher *);
+ void (*exit)(struct crypto_skcipher *);
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+ unsigned int chunksize;
+ unsigned int walksize;
+ struct crypto_alg base;
+};
+
+struct skcipher_instance {
+ void (*free)(struct skcipher_instance *);
+ union {
+ struct {
+ char head[64];
+ struct crypto_instance base;
+ } s;
+ struct skcipher_alg alg;
+ };
+};
+
+struct crypto_skcipher_spawn {
+ struct crypto_spawn base;
+};
+
+struct skcipher_walk {
+ union {
+ struct {
+ struct page *page;
+ long unsigned int offset;
+ } phys;
+ struct {
+ u8 *page;
+ void *addr;
+ } virt;
+ } src;
+ union {
+ struct {
+ struct page *page;
+ long unsigned int offset;
+ } phys;
+ struct {
+ u8 *page;
+ void *addr;
+ } virt;
+ } dst;
+ struct scatter_walk in;
+ unsigned int nbytes;
+ struct scatter_walk out;
+ unsigned int total;
+ struct list_head buffers;
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+ unsigned int ivsize;
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
+struct skcipher_ctx_simple {
+ struct crypto_cipher *cipher;
+};
+
+struct crypto_report_blkcipher {
+ char type[64];
+ char geniv[64];
+ unsigned int blocksize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+};
+
+enum {
+ SKCIPHER_WALK_PHYS = 1,
+ SKCIPHER_WALK_SLOW = 2,
+ SKCIPHER_WALK_COPY = 4,
+ SKCIPHER_WALK_DIFF = 8,
+ SKCIPHER_WALK_SLEEP = 16,
+};
+
+struct skcipher_walk_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ u8 *data;
+ u8 buffer[0];
+};
+
+struct hash_alg_common {
+ unsigned int digestsize;
+ unsigned int statesize;
+ struct crypto_alg base;
+};
+
+struct ahash_request {
+ struct crypto_async_request base;
+ unsigned int nbytes;
+ struct scatterlist *src;
+ u8 *result;
+ void *priv;
+ void *__ctx[0];
+};
+
+struct crypto_ahash;
+
+struct ahash_alg {
+ int (*init)(struct ahash_request *);
+ int (*update)(struct ahash_request *);
+ int (*final)(struct ahash_request *);
+ int (*finup)(struct ahash_request *);
+ int (*digest)(struct ahash_request *);
+ int (*export)(struct ahash_request *, void *);
+ int (*import)(struct ahash_request *, const void *);
+ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int);
+ struct hash_alg_common halg;
+};
+
+struct crypto_ahash {
+ int (*init)(struct ahash_request *);
+ int (*update)(struct ahash_request *);
+ int (*final)(struct ahash_request *);
+ int (*finup)(struct ahash_request *);
+ int (*digest)(struct ahash_request *);
+ int (*export)(struct ahash_request *, void *);
+ int (*import)(struct ahash_request *, const void *);
+ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int);
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+struct shash_alg {
+ int (*init)(struct shash_desc *);
+ int (*update)(struct shash_desc *, const u8 *, unsigned int);
+ int (*final)(struct shash_desc *, u8 *);
+ int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *);
+ int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *);
+ int (*export)(struct shash_desc *, void *);
+ int (*import)(struct shash_desc *, const void *);
+ int (*setkey)(struct crypto_shash *, const u8 *, unsigned int);
+ int (*init_tfm)(struct crypto_shash *);
+ void (*exit_tfm)(struct crypto_shash *);
+ unsigned int descsize;
+ int: 32;
+ unsigned int digestsize;
+ unsigned int statesize;
+ struct crypto_alg base;
+};
+
+struct crypto_hash_walk {
+ char *data;
+ unsigned int offset;
+ unsigned int alignmask;
+ struct page *pg;
+ unsigned int entrylen;
+ unsigned int total;
+ struct scatterlist *sg;
+ unsigned int flags;
+};
+
+struct ahash_instance {
+ void (*free)(struct ahash_instance *);
+ union {
+ struct {
+ char head[72];
+ struct crypto_instance base;
+ } s;
+ struct ahash_alg alg;
+ };
+};
+
+struct crypto_ahash_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_report_hash {
+ char type[64];
+ unsigned int blocksize;
+ unsigned int digestsize;
+};
+
+struct ahash_request_priv {
+ crypto_completion_t complete;
+ void *data;
+ u8 *result;
+ u32 flags;
+ void *ubuf[0];
+};
+
+struct shash_instance {
+ void (*free)(struct shash_instance *);
+ union {
+ struct {
+ char head[96];
+ struct crypto_instance base;
+ } s;
+ struct shash_alg alg;
+ };
+};
+
+struct crypto_shash_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_report_akcipher {
+ char type[64];
+};
+
+struct akcipher_request {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[0];
+};
+
+struct crypto_akcipher {
+ struct crypto_tfm base;
+};
+
+struct akcipher_alg {
+ int (*sign)(struct akcipher_request *);
+ int (*verify)(struct akcipher_request *);
+ int (*encrypt)(struct akcipher_request *);
+ int (*decrypt)(struct akcipher_request *);
+ int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int);
+ int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int);
+ unsigned int (*max_size)(struct crypto_akcipher *);
+ int (*init)(struct crypto_akcipher *);
+ void (*exit)(struct crypto_akcipher *);
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+struct akcipher_instance {
+ void (*free)(struct akcipher_instance *);
+ union {
+ struct {
+ char head[80];
+ struct crypto_instance base;
+ } s;
+ struct akcipher_alg alg;
+ };
+};
+
+struct crypto_akcipher_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_report_kpp {
+ char type[64];
+};
+
+struct kpp_request {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[0];
+};
+
+struct crypto_kpp {
+ struct crypto_tfm base;
+};
+
+struct kpp_alg {
+ int (*set_secret)(struct crypto_kpp *, const void *, unsigned int);
+ int (*generate_public_key)(struct kpp_request *);
+ int (*compute_shared_secret)(struct kpp_request *);
+ unsigned int (*max_size)(struct crypto_kpp *);
+ int (*init)(struct crypto_kpp *);
+ void (*exit)(struct crypto_kpp *);
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+struct crypto_report_acomp {
+ char type[64];
+};
+
+struct acomp_req {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int slen;
+ unsigned int dlen;
+ u32 flags;
+ void *__ctx[0];
+};
+
+struct crypto_acomp {
+ int (*compress)(struct acomp_req *);
+ int (*decompress)(struct acomp_req *);
+ void (*dst_free)(struct scatterlist *);
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+struct acomp_alg {
+ int (*compress)(struct acomp_req *);
+ int (*decompress)(struct acomp_req *);
+ void (*dst_free)(struct scatterlist *);
+ int (*init)(struct crypto_acomp *);
+ void (*exit)(struct crypto_acomp *);
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+struct crypto_report_comp {
+ char type[64];
+};
+
+struct crypto_scomp {
+ struct crypto_tfm base;
+};
+
+struct scomp_alg {
+ void * (*alloc_ctx)(struct crypto_scomp *);
+ void (*free_ctx)(struct crypto_scomp *, void *);
+ int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *);
+ int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *);
+ struct crypto_alg base;
+};
+
+struct scomp_scratch {
+ spinlock_t lock;
+ void *src;
+ void *dst;
+};
+
+struct cryptomgr_param {
+ struct rtattr *tb[34];
+ struct {
+ struct rtattr attr;
+ struct crypto_attr_type data;
+ } type;
+ union {
+ struct rtattr attr;
+ struct {
+ struct rtattr attr;
+ struct crypto_attr_alg data;
+ } alg;
+ struct {
+ struct rtattr attr;
+ struct crypto_attr_u32 data;
+ } nu32;
+ } attrs[32];
+ char template[128];
+ struct crypto_larval *larval;
+ u32 otype;
+ u32 omask;
+};
+
+struct crypto_test_param {
+ char driver[128];
+ char alg[128];
+ u32 type;
+};
+
+struct md5_state {
+ u32 hash[4];
+ u32 block[16];
+ u64 byte_count;
+};
+
+struct sha256_state {
+ u32 state[8];
+ u64 count;
+ u8 buf[64];
+};
+
+typedef struct {
+ u64 a;
+ u64 b;
+} u128;
+
+typedef struct {
+ __be64 a;
+ __be64 b;
+} be128;
+
+typedef struct {
+ __le64 b;
+ __le64 a;
+} le128;
+
+struct gf128mul_4k {
+ be128 t[256];
+};
+
+struct gf128mul_64k {
+ struct gf128mul_4k *t[16];
+};
+
+struct crypto_rfc3686_ctx {
+ struct crypto_skcipher *child;
+ u8 nonce[4];
+};
+
+struct crypto_rfc3686_req_ctx {
+ u8 iv[16];
+ struct skcipher_request subreq;
+};
+
+struct gcm_instance_ctx {
+ struct crypto_skcipher_spawn ctr;
+ struct crypto_ahash_spawn ghash;
+};
+
+struct crypto_gcm_ctx {
+ struct crypto_skcipher *ctr;
+ struct crypto_ahash *ghash;
+};
+
+struct crypto_rfc4106_ctx {
+ struct crypto_aead *child;
+ u8 nonce[4];
+};
+
+struct crypto_rfc4106_req_ctx {
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ struct aead_request subreq;
+};
+
+struct crypto_rfc4543_instance_ctx {
+ struct crypto_aead_spawn aead;
+};
+
+struct crypto_rfc4543_ctx {
+ struct crypto_aead *child;
+ struct crypto_sync_skcipher___2 *null;
+ u8 nonce[4];
+};
+
+struct crypto_rfc4543_req_ctx {
+ struct aead_request subreq;
+};
+
+struct crypto_gcm_ghash_ctx {
+ unsigned int cryptlen;
+ struct scatterlist *src;
+ int (*complete)(struct aead_request *, u32);
+};
+
+struct crypto_gcm_req_priv_ctx {
+ u8 iv[16];
+ u8 auth_tag[16];
+ u8 iauth_tag[16];
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ struct scatterlist sg;
+ struct crypto_gcm_ghash_ctx ghash_ctx;
+ union {
+ struct ahash_request ahreq;
+ struct skcipher_request skreq;
+ } u;
+};
+
+struct crypto_aes_ctx {
+ u32 key_enc[60];
+ u32 key_dec[60];
+ u32 key_length;
+};
+
+typedef unsigned char Byte;
+
+typedef long unsigned int uLong;
+
+struct internal_state;
+
+struct z_stream_s {
+ const Byte *next_in;
+ uLong avail_in;
+ uLong total_in;
+ Byte *next_out;
+ uLong avail_out;
+ uLong total_out;
+ char *msg;
+ struct internal_state *state;
+ void *workspace;
+ int data_type;
+ uLong adler;
+ uLong reserved;
+};
+
+struct internal_state {
+ int dummy;
+};
+
+struct deflate_ctx {
+ struct z_stream_s comp_stream;
+ struct z_stream_s decomp_stream;
+};
+
+struct chksum_ctx {
+ u32 key;
+};
+
+struct chksum_desc_ctx {
+ u32 crc;
+};
+
+struct crypto_report_rng {
+ char type[64];
+ unsigned int seedsize;
+};
+
+struct prng_context {
+ spinlock_t prng_lock;
+ unsigned char rand_data[16];
+ unsigned char last_rand_data[16];
+ unsigned char DT[16];
+ unsigned char I[16];
+ unsigned char V[16];
+ u32 rand_data_valid;
+ struct crypto_cipher *tfm;
+ u32 flags;
+};
+
+struct ghash_ctx {
+ struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[16];
+ u32 bytes;
+};
+
+struct sockaddr_alg {
+ __u16 salg_family;
+ __u8 salg_type[14];
+ __u32 salg_feat;
+ __u32 salg_mask;
+ __u8 salg_name[64];
+};
+
+struct af_alg_iv {
+ __u32 ivlen;
+ __u8 iv[0];
+};
+
+struct cmsghdr {
+ __kernel_size_t cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct net_proto_family {
+ int family;
+ int (*create)(struct net *, struct socket *, int, int);
+ struct module *owner;
+};
+
+enum {
+ SOCK_WAKE_IO = 0,
+ SOCK_WAKE_WAITD = 1,
+ SOCK_WAKE_SPACE = 2,
+ SOCK_WAKE_URG = 3,
+};
+
+struct af_alg_type;
+
+struct alg_sock {
+ struct sock sk;
+ struct sock *parent;
+ unsigned int refcnt;
+ unsigned int nokey_refcnt;
+ const struct af_alg_type *type;
+ void *private;
+};
+
+struct af_alg_type {
+ void * (*bind)(const char *, u32, u32);
+ void (*release)(void *);
+ int (*setkey)(void *, const u8 *, unsigned int);
+ int (*accept)(void *, struct sock *);
+ int (*accept_nokey)(void *, struct sock *);
+ int (*setauthsize)(void *, unsigned int);
+ struct proto_ops *ops;
+ struct proto_ops *ops_nokey;
+ struct module *owner;
+ char name[14];
+};
+
+struct af_alg_control {
+ struct af_alg_iv *iv;
+ int op;
+ unsigned int aead_assoclen;
+};
+
+struct af_alg_sgl {
+ struct scatterlist sg[17];
+ struct page *pages[16];
+ unsigned int npages;
+};
+
+struct af_alg_tsgl {
+ struct list_head list;
+ unsigned int cur;
+ struct scatterlist sg[0];
+};
+
+struct af_alg_rsgl {
+ struct af_alg_sgl sgl;
+ struct list_head list;
+ size_t sg_num_bytes;
+};
+
+struct af_alg_async_req {
+ struct kiocb *iocb;
+ struct sock *sk;
+ struct af_alg_rsgl first_rsgl;
+ struct af_alg_rsgl *last_rsgl;
+ struct list_head rsgl_list;
+ struct scatterlist *tsgl;
+ unsigned int tsgl_entries;
+ unsigned int outlen;
+ unsigned int areqlen;
+ union {
+ struct aead_request aead_req;
+ struct skcipher_request skcipher_req;
+ } cra_u;
+};
+
+struct af_alg_ctx {
+ struct list_head tsgl_list;
+ void *iv;
+ size_t aead_assoclen;
+ struct crypto_wait wait;
+ size_t used;
+ atomic_t rcvused;
+ bool more;
+ bool merge;
+ bool enc;
+ unsigned int len;
+};
+
+struct alg_type_list {
+ const struct af_alg_type *type;
+ struct list_head list;
+};
+
+struct hash_ctx {
+ struct af_alg_sgl sgl;
+ u8 *result;
+ struct crypto_wait wait;
+ unsigned int len;
+ bool more;
+ struct ahash_request req;
+};
+
+struct biovec_slab {
+ int nr_vecs;
+ char *name;
+ struct kmem_cache *slab;
+};
+
+enum rq_qos_id {
+ RQ_QOS_WBT = 0,
+ RQ_QOS_LATENCY = 1,
+ RQ_QOS_COST = 2,
+};
+
+struct rq_qos_ops;
+
+struct rq_qos {
+ struct rq_qos_ops *ops;
+ struct request_queue *q;
+ enum rq_qos_id id;
+ struct rq_qos *next;
+ struct dentry *debugfs_dir;
+};
+
+enum {
+ sysctl_hung_task_timeout_secs = 0,
+};
+
+enum hctx_type {
+ HCTX_TYPE_DEFAULT = 0,
+ HCTX_TYPE_READ = 1,
+ HCTX_TYPE_POLL = 2,
+ HCTX_MAX_TYPES = 3,
+};
+
+struct rq_qos_ops {
+ void (*throttle)(struct rq_qos *, struct bio *);
+ void (*track)(struct rq_qos *, struct request *, struct bio *);
+ void (*merge)(struct rq_qos *, struct request *, struct bio *);
+ void (*issue)(struct rq_qos *, struct request *);
+ void (*requeue)(struct rq_qos *, struct request *);
+ void (*done)(struct rq_qos *, struct request *);
+ void (*done_bio)(struct rq_qos *, struct bio *);
+ void (*cleanup)(struct rq_qos *, struct bio *);
+ void (*queue_depth_changed)(struct rq_qos *);
+ void (*exit)(struct rq_qos *);
+ const struct blk_mq_debugfs_attr *debugfs_attrs;
+};
+
+struct bio_slab {
+ struct kmem_cache *slab;
+ unsigned int slab_ref;
+ unsigned int slab_size;
+ char name[8];
+};
+
+enum {
+ BLK_MQ_F_SHOULD_MERGE = 1,
+ BLK_MQ_F_TAG_SHARED = 2,
+ BLK_MQ_F_STACKING = 4,
+ BLK_MQ_F_BLOCKING = 32,
+ BLK_MQ_F_NO_SCHED = 64,
+ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
+ BLK_MQ_F_ALLOC_POLICY_BITS = 1,
+ BLK_MQ_S_STOPPED = 0,
+ BLK_MQ_S_TAG_ACTIVE = 1,
+ BLK_MQ_S_SCHED_RESTART = 2,
+ BLK_MQ_S_INACTIVE = 3,
+ BLK_MQ_MAX_DEPTH = 10240,
+ BLK_MQ_CPU_WORK_BATCH = 8,
+};
+
+enum {
+ WBT_RWQ_BG = 0,
+ WBT_RWQ_KSWAPD = 1,
+ WBT_RWQ_DISCARD = 2,
+ WBT_NUM_RWQ = 3,
+};
+
+struct blk_plug_cb;
+
+typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
+
+struct blk_plug_cb {
+ struct list_head list;
+ blk_plug_cb_fn callback;
+ void *data;
+};
+
+enum {
+ BLK_MQ_REQ_NOWAIT = 1,
+ BLK_MQ_REQ_RESERVED = 2,
+ BLK_MQ_REQ_INTERNAL = 4,
+ BLK_MQ_REQ_PREEMPT = 8,
+};
+
+struct trace_event_raw_block_buffer {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ size_t size;
+ char __data[0];
+};
+
+struct trace_event_raw_block_rq_requeue {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ char rwbs[8];
+ u32 __data_loc_cmd;
+ char __data[0];
+};
+
+struct trace_event_raw_block_rq_complete {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ int error;
+ char rwbs[8];
+ u32 __data_loc_cmd;
+ char __data[0];
+};
+
+struct trace_event_raw_block_rq {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ unsigned int bytes;
+ char rwbs[8];
+ char comm[16];
+ u32 __data_loc_cmd;
+ char __data[0];
+};
+
+struct trace_event_raw_block_bio_bounce {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ char rwbs[8];
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_bio_complete {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ int error;
+ char rwbs[8];
+ char __data[0];
+};
+
+struct trace_event_raw_block_bio_merge {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ char rwbs[8];
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_bio_queue {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ char rwbs[8];
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_get_rq {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ char rwbs[8];
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_plug {
+ struct trace_entry ent;
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_unplug {
+ struct trace_entry ent;
+ int nr_rq;
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_split {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ sector_t new_sector;
+ char rwbs[8];
+ char comm[16];
+ char __data[0];
+};
+
+struct trace_event_raw_block_bio_remap {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ dev_t old_dev;
+ sector_t old_sector;
+ char rwbs[8];
+ char __data[0];
+};
+
+struct trace_event_raw_block_rq_remap {
+ struct trace_entry ent;
+ dev_t dev;
+ sector_t sector;
+ unsigned int nr_sector;
+ dev_t old_dev;
+ sector_t old_sector;
+ unsigned int nr_bios;
+ char rwbs[8];
+ char __data[0];
+};
+
+struct trace_event_data_offsets_block_buffer {};
+
+struct trace_event_data_offsets_block_rq_requeue {
+ u32 cmd;
+};
+
+struct trace_event_data_offsets_block_rq_complete {
+ u32 cmd;
+};
+
+struct trace_event_data_offsets_block_rq {
+ u32 cmd;
+};
+
+struct trace_event_data_offsets_block_bio_bounce {};
+
+struct trace_event_data_offsets_block_bio_complete {};
+
+struct trace_event_data_offsets_block_bio_merge {};
+
+struct trace_event_data_offsets_block_bio_queue {};
+
+struct trace_event_data_offsets_block_get_rq {};
+
+struct trace_event_data_offsets_block_plug {};
+
+struct trace_event_data_offsets_block_unplug {};
+
+struct trace_event_data_offsets_block_split {};
+
+struct trace_event_data_offsets_block_bio_remap {};
+
+struct trace_event_data_offsets_block_rq_remap {};
+
+typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *);
+
+typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *);
+
+typedef void (*btf_trace_block_rq_requeue)(void *, struct request_queue *, struct request *);
+
+typedef void (*btf_trace_block_rq_complete)(void *, struct request *, int, unsigned int);
+
+typedef void (*btf_trace_block_rq_insert)(void *, struct request_queue *, struct request *);
+
+typedef void (*btf_trace_block_rq_issue)(void *, struct request_queue *, struct request *);
+
+typedef void (*btf_trace_block_bio_bounce)(void *, struct request_queue *, struct bio *);
+
+typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *);
+
+typedef void (*btf_trace_block_bio_backmerge)(void *, struct request_queue *, struct request *, struct bio *);
+
+typedef void (*btf_trace_block_bio_frontmerge)(void *, struct request_queue *, struct request *, struct bio *);
+
+typedef void (*btf_trace_block_bio_queue)(void *, struct request_queue *, struct bio *);
+
+typedef void (*btf_trace_block_getrq)(void *, struct request_queue *, struct bio *, int);
+
+typedef void (*btf_trace_block_sleeprq)(void *, struct request_queue *, struct bio *, int);
+
+typedef void (*btf_trace_block_plug)(void *, struct request_queue *);
+
+typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool);
+
+typedef void (*btf_trace_block_split)(void *, struct request_queue *, struct bio *, unsigned int);
+
+typedef void (*btf_trace_block_bio_remap)(void *, struct request_queue *, struct bio *, dev_t, sector_t);
+
+typedef void (*btf_trace_block_rq_remap)(void *, struct request_queue *, struct request *, dev_t, sector_t);
+
+struct queue_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct request_queue *, char *);
+ ssize_t (*store)(struct request_queue *, const char *, size_t);
+};
+
+enum {
+ BLK_MQ_NO_TAG = -1,
+ BLK_MQ_TAG_MIN = 1,
+ BLK_MQ_TAG_MAX = -2,
+};
+
+enum {
+ REQ_FSEQ_PREFLUSH = 1,
+ REQ_FSEQ_DATA = 2,
+ REQ_FSEQ_POSTFLUSH = 4,
+ REQ_FSEQ_DONE = 8,
+ REQ_FSEQ_ACTIONS = 7,
+ FLUSH_PENDING_TIMEOUT = 1250,
+};
+
+enum blk_default_limits {
+ BLK_MAX_SEGMENTS = 128,
+ BLK_SAFE_MAX_SECTORS = 255,
+ BLK_DEF_MAX_SECTORS = 2560,
+ BLK_MAX_SEGMENT_SIZE = 65536,
+ BLK_SEG_BOUNDARY_MASK = -1,
+};
+
+enum {
+ ICQ_EXITED = 4,
+ ICQ_DESTROYED = 8,
+};
+
+struct rq_map_data {
+ struct page **pages;
+ int page_order;
+ int nr_entries;
+ long unsigned int offset;
+ int null_mapped;
+ int from_user;
+};
+
+struct bio_map_data {
+ int is_our_pages;
+ struct iov_iter iter;
+ struct iovec iov[0];
+};
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
+
+enum {
+ BLK_MQ_UNIQUE_TAG_BITS = 16,
+ BLK_MQ_UNIQUE_TAG_MASK = 65535,
+};
+
+struct mq_inflight {
+ struct hd_struct *part;
+ unsigned int inflight[2];
+};
+
+struct flush_busy_ctx_data {
+ struct blk_mq_hw_ctx *hctx;
+ struct list_head *list;
+};
+
+struct dispatch_rq_data {
+ struct blk_mq_hw_ctx *hctx;
+ struct request *rq;
+};
+
+struct rq_iter_data {
+ struct blk_mq_hw_ctx *hctx;
+ bool has_rq;
+};
+
+struct blk_mq_qe_pair {
+ struct list_head node;
+ struct request_queue *q;
+ struct elevator_type *type;
+};
+
+struct sbq_wait {
+ struct sbitmap_queue *sbq;
+ struct wait_queue_entry wait;
+};
+
+typedef bool busy_iter_fn(struct blk_mq_hw_ctx *, struct request *, void *, bool);
+
+typedef bool busy_tag_iter_fn(struct request *, void *, bool);
+
+struct bt_iter_data {
+ struct blk_mq_hw_ctx *hctx;
+ busy_iter_fn *fn;
+ void *data;
+ bool reserved;
+};
+
+struct bt_tags_iter_data {
+ struct blk_mq_tags *tags;
+ busy_tag_iter_fn *fn;
+ void *data;
+ unsigned int flags;
+};
+
+struct blk_queue_stats {
+ struct list_head callbacks;
+ spinlock_t lock;
+ bool enable_accounting;
+};
+
+struct blk_mq_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
+};
+
+struct blk_mq_hw_ctx_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
+ ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
+};
+
+struct hd_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ short unsigned int cylinders;
+ long unsigned int start;
+};
+
+struct blkpg_ioctl_arg {
+ int op;
+ int flags;
+ int datalen;
+ void *data;
+};
+
+struct blkpg_partition {
+ long long int start;
+ long long int length;
+ int pno;
+ char devname[64];
+ char volname[64];
+};
+
+struct pr_reservation {
+ __u64 key;
+ __u32 type;
+ __u32 flags;
+};
+
+struct pr_registration {
+ __u64 old_key;
+ __u64 new_key;
+ __u32 flags;
+ __u32 __pad;
+};
+
+struct pr_preempt {
+ __u64 old_key;
+ __u64 new_key;
+ __u32 type;
+ __u32 flags;
+};
+
+struct pr_clear {
+ __u64 key;
+ __u32 flags;
+ __u32 __pad;
+};
+
+struct klist_node;
+
+struct klist {
+ spinlock_t k_lock;
+ struct list_head k_list;
+ void (*get)(struct klist_node *);
+ void (*put)(struct klist_node *);
+};
+
+struct klist_node {
+ void *n_klist;
+ struct list_head n_node;
+ struct kref n_ref;
+};
+
+struct klist_iter {
+ struct klist *i_klist;
+ struct klist_node *i_cur;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+enum {
+ DISK_EVENT_FLAG_POLL = 1,
+ DISK_EVENT_FLAG_UEVENT = 2,
+};
+
+struct disk_events {
+ struct list_head node;
+ struct gendisk *disk;
+ spinlock_t lock;
+ struct mutex block_mutex;
+ int block;
+ unsigned int pending;
+ unsigned int clearing;
+ long int poll_msecs;
+ struct delayed_work dwork;
+};
+
+struct badblocks {
+ struct device *dev;
+ int count;
+ int unacked_exist;
+ int shift;
+ u64 *page;
+ int changed;
+ seqlock_t lock;
+ sector_t sector;
+ sector_t size;
+};
+
+struct disk_part_iter {
+ struct gendisk *disk;
+ struct hd_struct *part;
+ int idx;
+ unsigned int flags;
+};
+
+struct blk_major_name {
+ struct blk_major_name *next;
+ int major;
+ char name[16];
+};
+
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP = 2,
+ IOPRIO_WHO_USER = 3,
+};
+
+struct parsed_partitions {
+ struct block_device *bdev;
+ char name[32];
+ struct {
+ sector_t from;
+ sector_t size;
+ int flags;
+ bool has_info;
+ struct partition_meta_info info;
+ } *parts;
+ int next;
+ int limit;
+ bool access_beyond_eod;
+ char *pp_buf;
+};
+
+typedef struct {
+ struct page *v;
+} Sector;
+
+struct fat_boot_sector {
+ __u8 ignored[3];
+ __u8 system_id[8];
+ __u8 sector_size[2];
+ __u8 sec_per_clus;
+ __le16 reserved;
+ __u8 fats;
+ __u8 dir_entries[2];
+ __u8 sectors[2];
+ __u8 media;
+ __le16 fat_length;
+ __le16 secs_track;
+ __le16 heads;
+ __le32 hidden;
+ __le32 total_sect;
+ union {
+ struct {
+ __u8 drive_number;
+ __u8 state;
+ __u8 signature;
+ __u8 vol_id[4];
+ __u8 vol_label[11];
+ __u8 fs_type[8];
+ } fat16;
+ struct {
+ __le32 length;
+ __le16 flags;
+ __u8 version[2];
+ __le32 root_cluster;
+ __le16 info_sector;
+ __le16 backup_boot;
+ __le16 reserved2[6];
+ __u8 drive_number;
+ __u8 state;
+ __u8 signature;
+ __u8 vol_id[4];
+ __u8 vol_label[11];
+ __u8 fs_type[8];
+ } fat32;
+ };
+};
+
+struct msdos_partition {
+ u8 boot_ind;
+ u8 head;
+ u8 sector;
+ u8 cyl;
+ u8 sys_ind;
+ u8 end_head;
+ u8 end_sector;
+ u8 end_cyl;
+ __le32 start_sect;
+ __le32 nr_sects;
+};
+
+enum msdos_sys_ind {
+ DOS_EXTENDED_PARTITION = 5,
+ LINUX_EXTENDED_PARTITION = 133,
+ WIN98_EXTENDED_PARTITION = 15,
+ LINUX_DATA_PARTITION = 131,
+ LINUX_LVM_PARTITION = 142,
+ LINUX_RAID_PARTITION = 253,
+ SOLARIS_X86_PARTITION = 130,
+ NEW_SOLARIS_X86_PARTITION = 191,
+ DM6_AUX1PARTITION = 81,
+ DM6_AUX3PARTITION = 83,
+ DM6_PARTITION = 84,
+ EZD_PARTITION = 85,
+ FREEBSD_PARTITION = 165,
+ OPENBSD_PARTITION = 166,
+ NETBSD_PARTITION = 169,
+ BSDI_PARTITION = 183,
+ MINIX_PARTITION = 129,
+ UNIXWARE_PARTITION = 99,
+};
+
+struct _gpt_header {
+ __le64 signature;
+ __le32 revision;
+ __le32 header_size;
+ __le32 header_crc32;
+ __le32 reserved1;
+ __le64 my_lba;
+ __le64 alternate_lba;
+ __le64 first_usable_lba;
+ __le64 last_usable_lba;
+ efi_guid_t disk_guid;
+ __le64 partition_entry_lba;
+ __le32 num_partition_entries;
+ __le32 sizeof_partition_entry;
+ __le32 partition_entry_array_crc32;
+} __attribute__((packed));
+
+typedef struct _gpt_header gpt_header;
+
+struct _gpt_entry_attributes {
+ u64 required_to_function: 1;
+ u64 reserved: 47;
+ u64 type_guid_specific: 16;
+};
+
+typedef struct _gpt_entry_attributes gpt_entry_attributes;
+
+struct _gpt_entry {
+ efi_guid_t partition_type_guid;
+ efi_guid_t unique_partition_guid;
+ __le64 starting_lba;
+ __le64 ending_lba;
+ gpt_entry_attributes attributes;
+ __le16 partition_name[36];
+};
+
+typedef struct _gpt_entry gpt_entry;
+
+struct _gpt_mbr_record {
+ u8 boot_indicator;
+ u8 start_head;
+ u8 start_sector;
+ u8 start_track;
+ u8 os_type;
+ u8 end_head;
+ u8 end_sector;
+ u8 end_track;
+ __le32 starting_lba;
+ __le32 size_in_lba;
+};
+
+typedef struct _gpt_mbr_record gpt_mbr_record;
+
+struct _legacy_mbr {
+ u8 boot_code[440];
+ __le32 unique_mbr_signature;
+ __le16 unknown;
+ gpt_mbr_record partition_record[4];
+ __le16 signature;
+} __attribute__((packed));
+
+typedef struct _legacy_mbr legacy_mbr;
+
+struct rq_wait {
+ wait_queue_head_t wait;
+ atomic_t inflight;
+};
+
+struct rq_depth {
+ unsigned int max_depth;
+ int scale_step;
+ bool scaled_max;
+ unsigned int queue_depth;
+ unsigned int default_depth;
+};
+
+typedef bool acquire_inflight_cb_t(struct rq_wait *, void *);
+
+typedef void cleanup_cb_t(struct rq_wait *, void *);
+
+struct rq_qos_wait_data {
+ struct wait_queue_entry wq;
+ struct task_struct *task;
+ struct rq_wait *rqw;
+ acquire_inflight_cb_t *cb;
+ void *private_data;
+ bool got_token;
+};
+
+struct request_sense;
+
+struct cdrom_generic_command {
+ unsigned char cmd[12];
+ unsigned char *buffer;
+ unsigned int buflen;
+ int stat;
+ struct request_sense *sense;
+ unsigned char data_direction;
+ int quiet;
+ int timeout;
+ void *reserved[1];
+};
+
+struct request_sense {
+ __u8 error_code: 7;
+ __u8 valid: 1;
+ __u8 segment_number;
+ __u8 sense_key: 4;
+ __u8 reserved2: 1;
+ __u8 ili: 1;
+ __u8 reserved1: 2;
+ __u8 information[4];
+ __u8 add_sense_len;
+ __u8 command_info[4];
+ __u8 asc;
+ __u8 ascq;
+ __u8 fruc;
+ __u8 sks[3];
+ __u8 asb[46];
+};
+
+struct scsi_ioctl_command {
+ unsigned int inlen;
+ unsigned int outlen;
+ unsigned char data[0];
+};
+
+enum scsi_device_event {
+ SDEV_EVT_MEDIA_CHANGE = 1,
+ SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2,
+ SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3,
+ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4,
+ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5,
+ SDEV_EVT_LUN_CHANGE_REPORTED = 6,
+ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7,
+ SDEV_EVT_POWER_ON_RESET_OCCURRED = 8,
+ SDEV_EVT_FIRST = 1,
+ SDEV_EVT_LAST = 8,
+ SDEV_EVT_MAXBITS = 9,
+};
+
+struct scsi_request {
+ unsigned char __cmd[16];
+ unsigned char *cmd;
+ short unsigned int cmd_len;
+ int result;
+ unsigned int sense_len;
+ unsigned int resid_len;
+ int retries;
+ void *sense;
+};
+
+struct sg_io_hdr {
+ int interface_id;
+ int dxfer_direction;
+ unsigned char cmd_len;
+ unsigned char mx_sb_len;
+ short unsigned int iovec_count;
+ unsigned int dxfer_len;
+ void *dxferp;
+ unsigned char *cmdp;
+ void *sbp;
+ unsigned int timeout;
+ unsigned int flags;
+ int pack_id;
+ void *usr_ptr;
+ unsigned char status;
+ unsigned char masked_status;
+ unsigned char msg_status;
+ unsigned char sb_len_wr;
+ short unsigned int host_status;
+ short unsigned int driver_status;
+ int resid;
+ unsigned int duration;
+ unsigned int info;
+};
+
+struct blk_cmd_filter {
+ long unsigned int read_ok[4];
+ long unsigned int write_ok[4];
+};
+
+enum {
+ OMAX_SB_LEN = 16,
+};
+
+struct bsg_device {
+ struct request_queue *queue;
+ spinlock_t lock;
+ struct hlist_node dev_list;
+ refcount_t ref_count;
+ char name[20];
+ int max_queue;
+};
+
+typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t);
+
+typedef void blkcg_pol_init_cpd_fn(struct blkcg_policy_data *);
+
+typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *);
+
+typedef void blkcg_pol_bind_cpd_fn(struct blkcg_policy_data *);
+
+typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(gfp_t, struct request_queue *, struct blkcg *);
+
+typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *);
+
+typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *);
+
+typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *);
+
+typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *);
+
+typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *);
+
+typedef size_t blkcg_pol_stat_pd_fn(struct blkg_policy_data *, char *, size_t);
+
+struct blkcg_policy {
+ int plid;
+ struct cftype *dfl_cftypes;
+ struct cftype *legacy_cftypes;
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
+ blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
+ blkcg_pol_bind_cpd_fn *cpd_bind_fn;
+ blkcg_pol_alloc_pd_fn *pd_alloc_fn;
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
+ blkcg_pol_free_pd_fn *pd_free_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+ blkcg_pol_stat_pd_fn *pd_stat_fn;
+};
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ char *body;
+};
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ = 0,
+ BLKG_RWSTAT_WRITE = 1,
+ BLKG_RWSTAT_SYNC = 2,
+ BLKG_RWSTAT_ASYNC = 3,
+ BLKG_RWSTAT_DISCARD = 4,
+ BLKG_RWSTAT_NR = 5,
+ BLKG_RWSTAT_TOTAL = 5,
+};
+
+struct blkg_rwstat {
+ struct percpu_counter cpu_cnt[5];
+ atomic64_t aux_cnt[5];
+};
+
+struct blkg_rwstat_sample {
+ u64 cnt[5];
+};
+
+struct throtl_service_queue {
+ struct throtl_service_queue *parent_sq;
+ struct list_head queued[2];
+ unsigned int nr_queued[2];
+ struct rb_root_cached pending_tree;
+ unsigned int nr_pending;
+ long unsigned int first_pending_disptime;
+ struct timer_list pending_timer;
+};
+
+struct latency_bucket {
+ long unsigned int total_latency;
+ int samples;
+};
+
+struct avg_latency_bucket {
+ long unsigned int latency;
+ bool valid;
+};
+
+struct throtl_data {
+ struct throtl_service_queue service_queue;
+ struct request_queue *queue;
+ unsigned int nr_queued[2];
+ unsigned int throtl_slice;
+ struct work_struct dispatch_work;
+ unsigned int limit_index;
+ bool limit_valid[2];
+ long unsigned int low_upgrade_time;
+ long unsigned int low_downgrade_time;
+ unsigned int scale;
+ struct latency_bucket tmp_buckets[18];
+ struct avg_latency_bucket avg_buckets[18];
+ struct latency_bucket *latency_buckets[2];
+ long unsigned int last_calculate_time;
+ long unsigned int filtered_latency;
+ bool track_bio_latency;
+};
+
+struct throtl_grp;
+
+struct throtl_qnode {
+ struct list_head node;
+ struct bio_list bios;
+ struct throtl_grp *tg;
+};
+
+struct throtl_grp {
+ struct blkg_policy_data pd;
+ struct rb_node rb_node;
+ struct throtl_data *td;
+ struct throtl_service_queue service_queue;
+ struct throtl_qnode qnode_on_self[2];
+ struct throtl_qnode qnode_on_parent[2];
+ long unsigned int disptime;
+ unsigned int flags;
+ bool has_rules[2];
+ uint64_t bps[4];
+ uint64_t bps_conf[4];
+ unsigned int iops[4];
+ unsigned int iops_conf[4];
+ uint64_t bytes_disp[2];
+ unsigned int io_disp[2];
+ long unsigned int last_low_overflow_time[2];
+ uint64_t last_bytes_disp[2];
+ unsigned int last_io_disp[2];
+ long unsigned int last_check_time;
+ long unsigned int latency_target;
+ long unsigned int latency_target_conf;
+ long unsigned int slice_start[2];
+ long unsigned int slice_end[2];
+ long unsigned int last_finish_time;
+ long unsigned int checked_last_finish_time;
+ long unsigned int avg_idletime;
+ long unsigned int idletime_threshold;
+ long unsigned int idletime_threshold_conf;
+ unsigned int bio_cnt;
+ unsigned int bad_bio_cnt;
+ long unsigned int bio_cnt_reset_time;
+ struct blkg_rwstat stat_bytes;
+ struct blkg_rwstat stat_ios;
+};
+
+enum tg_state_flags {
+ THROTL_TG_PENDING = 1,
+ THROTL_TG_WAS_EMPTY = 2,
+};
+
+enum {
+ LIMIT_LOW = 0,
+ LIMIT_MAX = 1,
+ LIMIT_CNT = 2,
+};
+
+struct virtio_device_id {
+ __u32 device;
+ __u32 vendor;
+};
+
+struct virtio_device;
+
+struct virtqueue {
+ struct list_head list;
+ void (*callback)(struct virtqueue *);
+ const char *name;
+ struct virtio_device *vdev;
+ unsigned int index;
+ unsigned int num_free;
+ void *priv;
+};
+
+struct vringh_config_ops;
+
+struct virtio_config_ops;
+
+struct virtio_device {
+ int index;
+ bool failed;
+ bool config_enabled;
+ bool config_change_pending;
+ spinlock_t config_lock;
+ struct device dev;
+ struct virtio_device_id id;
+ const struct virtio_config_ops *config;
+ const struct vringh_config_ops *vringh_config;
+ struct list_head vqs;
+ u64 features;
+ void *priv;
+};
+
+typedef void vq_callback_t(struct virtqueue *);
+
+struct virtio_config_ops {
+ void (*get)(struct virtio_device *, unsigned int, void *, unsigned int);
+ void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int);
+ u32 (*generation)(struct virtio_device *);
+ u8 (*get_status)(struct virtio_device *);
+ void (*set_status)(struct virtio_device *, u8);
+ void (*reset)(struct virtio_device *);
+ int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity *);
+ void (*del_vqs)(struct virtio_device *);
+ u64 (*get_features)(struct virtio_device *);
+ int (*finalize_features)(struct virtio_device *);
+ const char * (*bus_name)(struct virtio_device *);
+ int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *);
+ const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int);
+};
+
+struct show_busy_params {
+ struct seq_file *m;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+typedef void (*swap_func_t)(void *, void *, int);
+
+typedef int (*cmp_r_func_t)(const void *, const void *, const void *);
+
+typedef __kernel_long_t __kernel_ptrdiff_t;
+
+typedef __kernel_ptrdiff_t ptrdiff_t;
+
+struct region {
+ unsigned int start;
+ unsigned int off;
+ unsigned int group_len;
+ unsigned int end;
+};
+
+enum {
+ REG_OP_ISFREE = 0,
+ REG_OP_ALLOC = 1,
+ REG_OP_RELEASE = 2,
+};
+
+typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t);
+
+typedef void sg_free_fn(struct scatterlist *, unsigned int);
+
+struct sg_page_iter {
+ struct scatterlist *sg;
+ unsigned int sg_pgoffset;
+ unsigned int __nents;
+ int __pg_advance;
+};
+
+struct sg_dma_page_iter {
+ struct sg_page_iter base;
+};
+
+struct sg_mapping_iter {
+ struct page *page;
+ void *addr;
+ size_t length;
+ size_t consumed;
+ struct sg_page_iter piter;
+ unsigned int __offset;
+ unsigned int __remaining;
+ unsigned int __flags;
+};
+
+typedef int (*cmp_func)(void *, const struct list_head *, const struct list_head *);
+
+struct __kfifo {
+ unsigned int in;
+ unsigned int out;
+ unsigned int mask;
+ unsigned int esize;
+ void *data;
+};
+
+struct rhltable {
+ struct rhashtable ht;
+};
+
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
+ unsigned int slot;
+ unsigned int skip;
+ bool end_of_table;
+};
+
+union nested_table {
+ union nested_table *table;
+ struct rhash_lock_head *bucket;
+};
+
+struct once_work {
+ struct work_struct work;
+ struct static_key_true *key;
+};
+
+struct genradix_iter {
+ size_t offset;
+ size_t pos;
+};
+
+struct genradix_node {
+ union {
+ struct genradix_node *children[512];
+ u8 data[4096];
+ };
+};
+
+struct reciprocal_value {
+ u32 m;
+ u8 sh1;
+ u8 sh2;
+};
+
+struct reciprocal_value_adv {
+ u32 m;
+ u8 sh;
+ u8 exp;
+ bool is_wide_m;
+};
+
+enum devm_ioremap_type {
+ DEVM_IOREMAP = 0,
+ DEVM_IOREMAP_UC = 1,
+ DEVM_IOREMAP_WC = 2,
+};
+
+struct pcim_iomap_devres {
+ void *table[6];
+};
+
+typedef struct z_stream_s z_stream;
+
+typedef z_stream *z_streamp;
+
+typedef struct {
+ unsigned char op;
+ unsigned char bits;
+ short unsigned int val;
+} code;
+
+typedef enum {
+ HEAD = 0,
+ FLAGS = 1,
+ TIME = 2,
+ OS = 3,
+ EXLEN = 4,
+ EXTRA = 5,
+ NAME = 6,
+ COMMENT = 7,
+ HCRC = 8,
+ DICTID = 9,
+ DICT = 10,
+ TYPE = 11,
+ TYPEDO = 12,
+ STORED = 13,
+ COPY = 14,
+ TABLE = 15,
+ LENLENS = 16,
+ CODELENS = 17,
+ LEN = 18,
+ LENEXT = 19,
+ DIST = 20,
+ DISTEXT = 21,
+ MATCH = 22,
+ LIT = 23,
+ CHECK = 24,
+ LENGTH = 25,
+ DONE = 26,
+ BAD = 27,
+ MEM = 28,
+ SYNC = 29,
+} inflate_mode;
+
+struct inflate_state {
+ inflate_mode mode;
+ int last;
+ int wrap;
+ int havedict;
+ int flags;
+ unsigned int dmax;
+ long unsigned int check;
+ long unsigned int total;
+ unsigned int wbits;
+ unsigned int wsize;
+ unsigned int whave;
+ unsigned int write;
+ unsigned char *window;
+ long unsigned int hold;
+ unsigned int bits;
+ unsigned int length;
+ unsigned int offset;
+ unsigned int extra;
+ const code *lencode;
+ const code *distcode;
+ unsigned int lenbits;
+ unsigned int distbits;
+ unsigned int ncode;
+ unsigned int nlen;
+ unsigned int ndist;
+ unsigned int have;
+ code *next;
+ short unsigned int lens[320];
+ short unsigned int work[288];
+ code codes[2048];
+};
+
+union uu {
+ short unsigned int us;
+ unsigned char b[2];
+};
+
+typedef unsigned int uInt;
+
+struct inflate_workspace {
+ struct inflate_state inflate_state;
+ unsigned char working_window[32768];
+};
+
+typedef enum {
+ CODES = 0,
+ LENS = 1,
+ DISTS = 2,
+} codetype;
+
+typedef unsigned char uch;
+
+typedef short unsigned int ush;
+
+typedef long unsigned int ulg;
+
+struct ct_data_s {
+ union {
+ ush freq;
+ ush code;
+ } fc;
+ union {
+ ush dad;
+ ush len;
+ } dl;
+};
+
+typedef struct ct_data_s ct_data;
+
+struct static_tree_desc_s {
+ const ct_data *static_tree;
+ const int *extra_bits;
+ int extra_base;
+ int elems;
+ int max_length;
+};
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+struct tree_desc_s {
+ ct_data *dyn_tree;
+ int max_code;
+ static_tree_desc *stat_desc;
+};
+
+typedef ush Pos;
+
+typedef unsigned int IPos;
+
+struct deflate_state {
+ z_streamp strm;
+ int status;
+ Byte *pending_buf;
+ ulg pending_buf_size;
+ Byte *pending_out;
+ int pending;
+ int noheader;
+ Byte data_type;
+ Byte method;
+ int last_flush;
+ uInt w_size;
+ uInt w_bits;
+ uInt w_mask;
+ Byte *window;
+ ulg window_size;
+ Pos *prev;
+ Pos *head;
+ uInt ins_h;
+ uInt hash_size;
+ uInt hash_bits;
+ uInt hash_mask;
+ uInt hash_shift;
+ long int block_start;
+ uInt match_length;
+ IPos prev_match;
+ int match_available;
+ uInt strstart;
+ uInt match_start;
+ uInt lookahead;
+ uInt prev_length;
+ uInt max_chain_length;
+ uInt max_lazy_match;
+ int level;
+ int strategy;
+ uInt good_match;
+ int nice_match;
+ struct ct_data_s dyn_ltree[573];
+ struct ct_data_s dyn_dtree[61];
+ struct ct_data_s bl_tree[39];
+ struct tree_desc_s l_desc;
+ struct tree_desc_s d_desc;
+ struct tree_desc_s bl_desc;
+ ush bl_count[16];
+ int heap[573];
+ int heap_len;
+ int heap_max;
+ uch depth[573];
+ uch *l_buf;
+ uInt lit_bufsize;
+ uInt last_lit;
+ ush *d_buf;
+ ulg opt_len;
+ ulg static_len;
+ ulg compressed_len;
+ uInt matches;
+ int last_eob_len;
+ ush bi_buf;
+ int bi_valid;
+};
+
+typedef struct deflate_state deflate_state;
+
+typedef enum {
+ need_more = 0,
+ block_done = 1,
+ finish_started = 2,
+ finish_done = 3,
+} block_state;
+
+typedef block_state (*compress_func)(deflate_state *, int);
+
+struct deflate_workspace {
+ deflate_state deflate_memory;
+ Byte *window_memory;
+ Pos *prev_memory;
+ Pos *head_memory;
+ char *overlay_memory;
+};
+
+typedef struct deflate_workspace deflate_workspace;
+
+struct config_s {
+ ush good_length;
+ ush max_lazy;
+ ush nice_length;
+ ush max_chain;
+ compress_func func;
+};
+
+typedef struct config_s config;
+
+typedef struct tree_desc_s tree_desc;
+
+enum xz_mode {
+ XZ_SINGLE = 0,
+ XZ_PREALLOC = 1,
+ XZ_DYNALLOC = 2,
+};
+
+enum xz_ret {
+ XZ_OK = 0,
+ XZ_STREAM_END = 1,
+ XZ_UNSUPPORTED_CHECK = 2,
+ XZ_MEM_ERROR = 3,
+ XZ_MEMLIMIT_ERROR = 4,
+ XZ_FORMAT_ERROR = 5,
+ XZ_OPTIONS_ERROR = 6,
+ XZ_DATA_ERROR = 7,
+ XZ_BUF_ERROR = 8,
+};
+
+struct xz_buf {
+ const uint8_t *in;
+ size_t in_pos;
+ size_t in_size;
+ uint8_t *out;
+ size_t out_pos;
+ size_t out_size;
+};
+
+typedef uint64_t vli_type;
+
+enum xz_check {
+ XZ_CHECK_NONE = 0,
+ XZ_CHECK_CRC32 = 1,
+ XZ_CHECK_CRC64 = 4,
+ XZ_CHECK_SHA256 = 10,
+};
+
+struct xz_dec_hash {
+ vli_type unpadded;
+ vli_type uncompressed;
+ uint32_t crc32;
+};
+
+struct xz_dec_lzma2;
+
+struct xz_dec_bcj;
+
+struct xz_dec {
+ enum {
+ SEQ_STREAM_HEADER = 0,
+ SEQ_BLOCK_START = 1,
+ SEQ_BLOCK_HEADER = 2,
+ SEQ_BLOCK_UNCOMPRESS = 3,
+ SEQ_BLOCK_PADDING = 4,
+ SEQ_BLOCK_CHECK = 5,
+ SEQ_INDEX = 6,
+ SEQ_INDEX_PADDING = 7,
+ SEQ_INDEX_CRC32 = 8,
+ SEQ_STREAM_FOOTER = 9,
+ } sequence;
+ uint32_t pos;
+ vli_type vli;
+ size_t in_start;
+ size_t out_start;
+ uint32_t crc32;
+ enum xz_check check_type;
+ enum xz_mode mode;
+ bool allow_buf_error;
+ struct {
+ vli_type compressed;
+ vli_type uncompressed;
+ uint32_t size;
+ } block_header;
+ struct {
+ vli_type compressed;
+ vli_type uncompressed;
+ vli_type count;
+ struct xz_dec_hash hash;
+ } block;
+ struct {
+ enum {
+ SEQ_INDEX_COUNT = 0,
+ SEQ_INDEX_UNPADDED = 1,
+ SEQ_INDEX_UNCOMPRESSED = 2,
+ } sequence;
+ vli_type size;
+ vli_type count;
+ struct xz_dec_hash hash;
+ } index;
+ struct {
+ size_t pos;
+ size_t size;
+ uint8_t buf[1024];
+ } temp;
+ struct xz_dec_lzma2 *lzma2;
+ struct xz_dec_bcj *bcj;
+ bool bcj_active;
+};
+
+enum lzma_state {
+ STATE_LIT_LIT = 0,
+ STATE_MATCH_LIT_LIT = 1,
+ STATE_REP_LIT_LIT = 2,
+ STATE_SHORTREP_LIT_LIT = 3,
+ STATE_MATCH_LIT = 4,
+ STATE_REP_LIT = 5,
+ STATE_SHORTREP_LIT = 6,
+ STATE_LIT_MATCH = 7,
+ STATE_LIT_LONGREP = 8,
+ STATE_LIT_SHORTREP = 9,
+ STATE_NONLIT_MATCH = 10,
+ STATE_NONLIT_REP = 11,
+};
+
+struct dictionary {
+ uint8_t *buf;
+ size_t start;
+ size_t pos;
+ size_t full;
+ size_t limit;
+ size_t end;
+ uint32_t size;
+ uint32_t size_max;
+ uint32_t allocated;
+ enum xz_mode mode;
+};
+
+struct rc_dec {
+ uint32_t range;
+ uint32_t code;
+ uint32_t init_bytes_left;
+ const uint8_t *in;
+ size_t in_pos;
+ size_t in_limit;
+};
+
+struct lzma_len_dec {
+ uint16_t choice;
+ uint16_t choice2;
+ uint16_t low[128];
+ uint16_t mid[128];
+ uint16_t high[256];
+};
+
+struct lzma_dec {
+ uint32_t rep0;
+ uint32_t rep1;
+ uint32_t rep2;
+ uint32_t rep3;
+ enum lzma_state state;
+ uint32_t len;
+ uint32_t lc;
+ uint32_t literal_pos_mask;
+ uint32_t pos_mask;
+ uint16_t is_match[192];
+ uint16_t is_rep[12];
+ uint16_t is_rep0[12];
+ uint16_t is_rep1[12];
+ uint16_t is_rep2[12];
+ uint16_t is_rep0_long[192];
+ uint16_t dist_slot[256];
+ uint16_t dist_special[114];
+ uint16_t dist_align[16];
+ struct lzma_len_dec match_len_dec;
+ struct lzma_len_dec rep_len_dec;
+ uint16_t literal[12288];
+};
+
+enum lzma2_seq {
+ SEQ_CONTROL = 0,
+ SEQ_UNCOMPRESSED_1 = 1,
+ SEQ_UNCOMPRESSED_2 = 2,
+ SEQ_COMPRESSED_0 = 3,
+ SEQ_COMPRESSED_1 = 4,
+ SEQ_PROPERTIES = 5,
+ SEQ_LZMA_PREPARE = 6,
+ SEQ_LZMA_RUN = 7,
+ SEQ_COPY = 8,
+};
+
+struct lzma2_dec {
+ enum lzma2_seq sequence;
+ enum lzma2_seq next_sequence;
+ uint32_t uncompressed;
+ uint32_t compressed;
+ bool need_dict_reset;
+ bool need_props;
+};
+
+struct xz_dec_lzma2___2 {
+ struct rc_dec rc;
+ struct dictionary dict;
+ struct lzma2_dec lzma2;
+ struct lzma_dec lzma;
+ struct {
+ uint32_t size;
+ uint8_t buf[63];
+ } temp;
+};
+
+struct xz_dec_bcj___2 {
+ enum {
+ BCJ_X86 = 4,
+ BCJ_POWERPC = 5,
+ BCJ_IA64 = 6,
+ BCJ_ARM = 7,
+ BCJ_ARMTHUMB = 8,
+ BCJ_SPARC = 9,
+ } type;
+ enum xz_ret ret;
+ bool single_call;
+ uint32_t pos;
+ uint32_t x86_prev_mask;
+ uint8_t *out;
+ size_t out_pos;
+ size_t out_size;
+ struct {
+ size_t filtered;
+ size_t size;
+ uint8_t buf[16];
+ } temp;
+};
+
+struct ts_state {
+ unsigned int offset;
+ char cb[40];
+};
+
+struct ts_config;
+
+struct ts_ops {
+ const char *name;
+ struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
+ unsigned int (*find)(struct ts_config *, struct ts_state *);
+ void (*destroy)(struct ts_config *);
+ void * (*get_pattern)(struct ts_config *);
+ unsigned int (*get_pattern_len)(struct ts_config *);
+ struct module *owner;
+ struct list_head list;
+};
+
+struct ts_config {
+ struct ts_ops *ops;
+ int flags;
+ unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *);
+ void (*finish)(struct ts_config *, struct ts_state *);
+};
+
+struct ts_linear_state {
+ unsigned int len;
+ const void *data;
+};
+
+struct ts_kmp {
+ u8 *pattern;
+ unsigned int pattern_len;
+ unsigned int prefix_tbl[0];
+};
+
+struct ts_bm {
+ u8 *pattern;
+ unsigned int patlen;
+ unsigned int bad_shift[256];
+ unsigned int good_shift[0];
+};
+
+enum {
+ TS_FSM_SPECIFIC = 0,
+ TS_FSM_WILDCARD = 1,
+ TS_FSM_DIGIT = 2,
+ TS_FSM_XDIGIT = 3,
+ TS_FSM_PRINT = 4,
+ TS_FSM_ALPHA = 5,
+ TS_FSM_ALNUM = 6,
+ TS_FSM_ASCII = 7,
+ TS_FSM_CNTRL = 8,
+ TS_FSM_GRAPH = 9,
+ TS_FSM_LOWER = 10,
+ TS_FSM_UPPER = 11,
+ TS_FSM_PUNCT = 12,
+ TS_FSM_SPACE = 13,
+ __TS_FSM_TYPE_MAX = 14,
+};
+
+enum {
+ TS_FSM_SINGLE = 0,
+ TS_FSM_PERHAPS = 1,
+ TS_FSM_ANY = 2,
+ TS_FSM_MULTI = 3,
+ TS_FSM_HEAD_IGNORE = 4,
+ __TS_FSM_RECUR_MAX = 5,
+};
+
+struct ts_fsm_token {
+ __u16 type;
+ __u8 recur;
+ __u8 value;
+};
+
+struct ts_fsm {
+ unsigned int ntokens;
+ struct ts_fsm_token tokens[0];
+};
+
+typedef s32 pao_T_____6;
+
+struct ei_entry {
+ struct list_head list;
+ long unsigned int start_addr;
+ long unsigned int end_addr;
+ int etype;
+ void *priv;
+};
+
+struct nla_bitfield32 {
+ __u32 value;
+ __u32 selector;
+};
+
+enum {
+ NLA_UNSPEC = 0,
+ NLA_U8 = 1,
+ NLA_U16 = 2,
+ NLA_U32 = 3,
+ NLA_U64 = 4,
+ NLA_STRING = 5,
+ NLA_FLAG = 6,
+ NLA_MSECS = 7,
+ NLA_NESTED = 8,
+ NLA_NESTED_ARRAY = 9,
+ NLA_NUL_STRING = 10,
+ NLA_BINARY = 11,
+ NLA_S8 = 12,
+ NLA_S16 = 13,
+ NLA_S32 = 14,
+ NLA_S64 = 15,
+ NLA_BITFIELD32 = 16,
+ NLA_REJECT = 17,
+ NLA_EXACT_LEN = 18,
+ NLA_MIN_LEN = 19,
+ __NLA_TYPE_MAX = 20,
+};
+
+enum nla_policy_validation {
+ NLA_VALIDATE_NONE = 0,
+ NLA_VALIDATE_RANGE = 1,
+ NLA_VALIDATE_MIN = 2,
+ NLA_VALIDATE_MAX = 3,
+ NLA_VALIDATE_RANGE_PTR = 4,
+ NLA_VALIDATE_FUNCTION = 5,
+ NLA_VALIDATE_WARN_TOO_LONG = 6,
+};
+
+enum netlink_validation {
+ NL_VALIDATE_LIBERAL = 0,
+ NL_VALIDATE_TRAILING = 1,
+ NL_VALIDATE_MAXTYPE = 2,
+ NL_VALIDATE_UNSPEC = 4,
+ NL_VALIDATE_STRICT_ATTRS = 8,
+ NL_VALIDATE_NESTED = 16,
+};
+
+struct cpu_rmap {
+ struct kref refcount;
+ u16 size;
+ u16 used;
+ void **obj;
+ struct {
+ u16 index;
+ u16 dist;
+ } near[0];
+};
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+struct sg_pool {
+ size_t size;
+ char *name;
+ struct kmem_cache *slab;
+ mempool_t *pool;
+};
+
+enum asn1_method {
+ ASN1_PRIM = 0,
+ ASN1_CONS = 1,
+};
+
+enum asn1_tag {
+ ASN1_EOC = 0,
+ ASN1_BOOL = 1,
+ ASN1_INT = 2,
+ ASN1_BTS = 3,
+ ASN1_OTS = 4,
+ ASN1_NULL = 5,
+ ASN1_OID = 6,
+ ASN1_ODE = 7,
+ ASN1_EXT = 8,
+ ASN1_REAL = 9,
+ ASN1_ENUM = 10,
+ ASN1_EPDV = 11,
+ ASN1_UTF8STR = 12,
+ ASN1_RELOID = 13,
+ ASN1_SEQ = 16,
+ ASN1_SET = 17,
+ ASN1_NUMSTR = 18,
+ ASN1_PRNSTR = 19,
+ ASN1_TEXSTR = 20,
+ ASN1_VIDSTR = 21,
+ ASN1_IA5STR = 22,
+ ASN1_UNITIM = 23,
+ ASN1_GENTIM = 24,
+ ASN1_GRASTR = 25,
+ ASN1_VISSTR = 26,
+ ASN1_GENSTR = 27,
+ ASN1_UNISTR = 28,
+ ASN1_CHRSTR = 29,
+ ASN1_BMPSTR = 30,
+ ASN1_LONG_TAG = 31,
+};
+
+typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t);
+
+struct asn1_decoder {
+ const unsigned char *machine;
+ size_t machlen;
+ const asn1_action_t *actions;
+};
+
+enum asn1_opcode {
+ ASN1_OP_MATCH = 0,
+ ASN1_OP_MATCH_OR_SKIP = 1,
+ ASN1_OP_MATCH_ACT = 2,
+ ASN1_OP_MATCH_ACT_OR_SKIP = 3,
+ ASN1_OP_MATCH_JUMP = 4,
+ ASN1_OP_MATCH_JUMP_OR_SKIP = 5,
+ ASN1_OP_MATCH_ANY = 8,
+ ASN1_OP_MATCH_ANY_OR_SKIP = 9,
+ ASN1_OP_MATCH_ANY_ACT = 10,
+ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11,
+ ASN1_OP_COND_MATCH_OR_SKIP = 17,
+ ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19,
+ ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21,
+ ASN1_OP_COND_MATCH_ANY = 24,
+ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25,
+ ASN1_OP_COND_MATCH_ANY_ACT = 26,
+ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27,
+ ASN1_OP_COND_FAIL = 28,
+ ASN1_OP_COMPLETE = 29,
+ ASN1_OP_ACT = 30,
+ ASN1_OP_MAYBE_ACT = 31,
+ ASN1_OP_END_SEQ = 32,
+ ASN1_OP_END_SET = 33,
+ ASN1_OP_END_SEQ_OF = 34,
+ ASN1_OP_END_SET_OF = 35,
+ ASN1_OP_END_SEQ_ACT = 36,
+ ASN1_OP_END_SET_ACT = 37,
+ ASN1_OP_END_SEQ_OF_ACT = 38,
+ ASN1_OP_END_SET_OF_ACT = 39,
+ ASN1_OP_RETURN = 40,
+ ASN1_OP__NR = 41,
+};
+
+struct font_desc {
+ int idx;
+ const char *name;
+ int width;
+ int height;
+ const void *data;
+ int pref;
+};
+
+typedef u16 ucs2_char_t;
+
+struct msr {
+ union {
+ struct {
+ u32 l;
+ u32 h;
+ };
+ u64 q;
+ };
+};
+
+struct msr_info {
+ u32 msr_no;
+ struct msr reg;
+ struct msr *msrs;
+ int err;
+};
+
+struct msr_regs_info {
+ u32 *regs;
+ int err;
+};
+
+struct msr_info_completion {
+ struct msr_info msr;
+ struct completion done;
+};
+
+struct trace_event_raw_msr_trace_class {
+ struct trace_entry ent;
+ unsigned int msr;
+ u64 val;
+ int failed;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_msr_trace_class {};
+
+typedef void (*btf_trace_read_msr)(void *, unsigned int, u64, int);
+
+typedef void (*btf_trace_write_msr)(void *, unsigned int, u64, int);
+
+typedef void (*btf_trace_rdpmc)(void *, unsigned int, u64, int);
+
+struct compress_format {
+ unsigned char magic[2];
+ const char *name;
+ decompress_fn decompressor;
+};
+
+struct cpio_data {
+ void *data;
+ size_t size;
+ char name[18];
+};
+
+enum cpio_fields {
+ C_MAGIC = 0,
+ C_INO = 1,
+ C_MODE = 2,
+ C_UID = 3,
+ C_GID = 4,
+ C_NLINK = 5,
+ C_MTIME = 6,
+ C_FILESIZE = 7,
+ C_MAJ = 8,
+ C_MIN = 9,
+ C_RMAJ = 10,
+ C_RMIN = 11,
+ C_NAMESIZE = 12,
+ C_CHKSUM = 13,
+ C_NFIELDS = 14,
+};
+
+struct fprop_local_single {
+ long unsigned int events;
+ unsigned int period;
+ raw_spinlock_t lock;
+};
+
+struct ida_bitmap {
+ long unsigned int bitmap[16];
+};
+
+struct klist_waiter {
+ struct list_head list;
+ struct klist_node *node;
+ struct task_struct *process;
+ int woken;
+};
+
+struct uevent_sock {
+ struct list_head list;
+ struct sock *sk;
+};
+
+enum {
+ LOGIC_PIO_INDIRECT = 0,
+ LOGIC_PIO_CPU_MMIO = 1,
+};
+
+struct logic_pio_host_ops;
+
+struct logic_pio_hwaddr {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ resource_size_t hw_start;
+ resource_size_t io_start;
+ resource_size_t size;
+ long unsigned int flags;
+ void *hostdata;
+ const struct logic_pio_host_ops *ops;
+};
+
+struct logic_pio_host_ops {
+ u32 (*in)(void *, long unsigned int, size_t);
+ void (*out)(void *, long unsigned int, u32, size_t);
+ u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int);
+ void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int);
+};
+
+typedef struct {
+ long unsigned int key[2];
+} hsiphash_key_t;
+
+struct clk_hw;
+
+struct clk_rate_request {
+ long unsigned int rate;
+ long unsigned int min_rate;
+ long unsigned int max_rate;
+ long unsigned int best_parent_rate;
+ struct clk_hw *best_parent_hw;
+};
+
+struct clk_core;
+
+struct clk_init_data;
+
+struct clk_hw {
+ struct clk_core *core;
+ struct clk *clk;
+ const struct clk_init_data *init;
+};
+
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
+struct clk_ops {
+ int (*prepare)(struct clk_hw *);
+ void (*unprepare)(struct clk_hw *);
+ int (*is_prepared)(struct clk_hw *);
+ void (*unprepare_unused)(struct clk_hw *);
+ int (*enable)(struct clk_hw *);
+ void (*disable)(struct clk_hw *);
+ int (*is_enabled)(struct clk_hw *);
+ void (*disable_unused)(struct clk_hw *);
+ int (*save_context)(struct clk_hw *);
+ void (*restore_context)(struct clk_hw *);
+ long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int);
+ long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *);
+ int (*determine_rate)(struct clk_hw *, struct clk_rate_request *);
+ int (*set_parent)(struct clk_hw *, u8);
+ u8 (*get_parent)(struct clk_hw *);
+ int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int);
+ int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8);
+ long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int);
+ int (*get_phase)(struct clk_hw *);
+ int (*set_phase)(struct clk_hw *, int);
+ int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *);
+ int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *);
+ int (*init)(struct clk_hw *);
+ void (*terminate)(struct clk_hw *);
+ void (*debug_init)(struct clk_hw *, struct dentry *);
+};
+
+struct clk_parent_data {
+ const struct clk_hw *hw;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
+struct clk_init_data {
+ const char *name;
+ const struct clk_ops *ops;
+ const char * const *parent_names;
+ const struct clk_parent_data *parent_data;
+ const struct clk_hw **parent_hws;
+ u8 num_parents;
+ long unsigned int flags;
+};
+
+struct sockaddr_in {
+ __kernel_sa_family_t sin_family;
+ __be16 sin_port;
+ struct in_addr sin_addr;
+ unsigned char __pad[8];
+};
+
+struct sockaddr_in6 {
+ short unsigned int sin6_family;
+ __be16 sin6_port;
+ __be32 sin6_flowinfo;
+ struct in6_addr sin6_addr;
+ __u32 sin6_scope_id;
+};
+
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *);
+ struct module *owner;
+};
+
+enum format_type {
+ FORMAT_TYPE_NONE = 0,
+ FORMAT_TYPE_WIDTH = 1,
+ FORMAT_TYPE_PRECISION = 2,
+ FORMAT_TYPE_CHAR = 3,
+ FORMAT_TYPE_STR = 4,
+ FORMAT_TYPE_PTR = 5,
+ FORMAT_TYPE_PERCENT_CHAR = 6,
+ FORMAT_TYPE_INVALID = 7,
+ FORMAT_TYPE_LONG_LONG = 8,
+ FORMAT_TYPE_ULONG = 9,
+ FORMAT_TYPE_LONG = 10,
+ FORMAT_TYPE_UBYTE = 11,
+ FORMAT_TYPE_BYTE = 12,
+ FORMAT_TYPE_USHORT = 13,
+ FORMAT_TYPE_SHORT = 14,
+ FORMAT_TYPE_UINT = 15,
+ FORMAT_TYPE_INT = 16,
+ FORMAT_TYPE_SIZE_T = 17,
+ FORMAT_TYPE_PTRDIFF = 18,
+};
+
+struct printf_spec {
+ unsigned int type: 8;
+ int field_width: 24;
+ unsigned int flags: 8;
+ unsigned int base: 8;
+ int precision: 16;
+};
+
+struct minmax_sample {
+ u32 t;
+ u32 v;
+};
+
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+struct xa_limit {
+ u32 max;
+ u32 min;
+};
+
+enum {
+ st_wordstart = 0,
+ st_wordcmp = 1,
+ st_wordskip = 2,
+ st_bufcpy = 3,
+};
+
+enum {
+ st_wordstart___2 = 0,
+ st_wordcmp___2 = 1,
+ st_wordskip___2 = 2,
+};
+
+struct in6_addr___2;
+
+enum reg_type {
+ REG_TYPE_RM = 0,
+ REG_TYPE_INDEX = 1,
+ REG_TYPE_BASE = 2,
+};
+
+struct acpi_device;
+
+struct pci_sysdata {
+ int domain;
+ int node;
+ struct acpi_device *companion;
+ void *iommu;
+ void *fwnode;
+};
+
+struct pci_bus_resource {
+ struct list_head list;
+ struct resource *res;
+ unsigned int flags;
+};
+
+typedef u64 pci_bus_addr_t;
+
+struct pci_bus_region {
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
+};
+
+enum pci_fixup_pass {
+ pci_fixup_early = 0,
+ pci_fixup_header = 1,
+ pci_fixup_final = 2,
+ pci_fixup_enable = 3,
+ pci_fixup_resume = 4,
+ pci_fixup_suspend = 5,
+ pci_fixup_resume_early = 6,
+ pci_fixup_suspend_late = 7,
+};
+
+struct hotplug_slot_ops;
+
+struct hotplug_slot {
+ const struct hotplug_slot_ops *ops;
+ struct list_head slot_list;
+ struct pci_slot *pci_slot;
+ struct module *owner;
+ const char *mod_name;
+};
+
+enum pci_dev_flags {
+ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1,
+ PCI_DEV_FLAGS_NO_D3 = 2,
+ PCI_DEV_FLAGS_ASSIGNED = 4,
+ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8,
+ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32,
+ PCI_DEV_FLAGS_NO_BUS_RESET = 64,
+ PCI_DEV_FLAGS_NO_PM_RESET = 128,
+ PCI_DEV_FLAGS_VPD_REF_F0 = 256,
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512,
+ PCI_DEV_FLAGS_NO_FLR_RESET = 1024,
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048,
+};
+
+enum pci_bus_flags {
+ PCI_BUS_FLAGS_NO_MSI = 1,
+ PCI_BUS_FLAGS_NO_MMRBC = 2,
+ PCI_BUS_FLAGS_NO_AERSID = 4,
+ PCI_BUS_FLAGS_NO_EXTCFG = 8,
+};
+
+enum pci_bus_speed {
+ PCI_SPEED_33MHz = 0,
+ PCI_SPEED_66MHz = 1,
+ PCI_SPEED_66MHz_PCIX = 2,
+ PCI_SPEED_100MHz_PCIX = 3,
+ PCI_SPEED_133MHz_PCIX = 4,
+ PCI_SPEED_66MHz_PCIX_ECC = 5,
+ PCI_SPEED_100MHz_PCIX_ECC = 6,
+ PCI_SPEED_133MHz_PCIX_ECC = 7,
+ PCI_SPEED_66MHz_PCIX_266 = 9,
+ PCI_SPEED_100MHz_PCIX_266 = 10,
+ PCI_SPEED_133MHz_PCIX_266 = 11,
+ AGP_UNKNOWN = 12,
+ AGP_1X = 13,
+ AGP_2X = 14,
+ AGP_4X = 15,
+ AGP_8X = 16,
+ PCI_SPEED_66MHz_PCIX_533 = 17,
+ PCI_SPEED_100MHz_PCIX_533 = 18,
+ PCI_SPEED_133MHz_PCIX_533 = 19,
+ PCIE_SPEED_2_5GT = 20,
+ PCIE_SPEED_5_0GT = 21,
+ PCIE_SPEED_8_0GT = 22,
+ PCIE_SPEED_16_0GT = 23,
+ PCIE_SPEED_32_0GT = 24,
+ PCI_SPEED_UNKNOWN = 255,
+};
+
+struct pci_host_bridge {
+ struct device dev;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+ void *sysdata;
+ int busnr;
+ struct list_head windows;
+ struct list_head dma_ranges;
+ u8 (*swizzle_irq)(struct pci_dev *, u8 *);
+ int (*map_irq)(const struct pci_dev *, u8, u8);
+ void (*release_fn)(struct pci_host_bridge *);
+ void *release_data;
+ struct msi_controller *msi;
+ unsigned int ignore_reset_delay: 1;
+ unsigned int no_ext_tags: 1;
+ unsigned int native_aer: 1;
+ unsigned int native_pcie_hotplug: 1;
+ unsigned int native_shpc_hotplug: 1;
+ unsigned int native_pme: 1;
+ unsigned int native_ltr: 1;
+ unsigned int native_dpc: 1;
+ unsigned int preserve_config: 1;
+ unsigned int size_windows: 1;
+ resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t);
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long unsigned int private[0];
+};
+
+enum {
+ PCI_REASSIGN_ALL_RSRC = 1,
+ PCI_REASSIGN_ALL_BUS = 2,
+ PCI_PROBE_ONLY = 4,
+ PCI_CAN_SKIP_ISA_ALIGN = 8,
+ PCI_ENABLE_PROC_DOMAINS = 16,
+ PCI_COMPAT_DOMAIN_0 = 32,
+ PCI_SCAN_ALL_PCIE_DEVS = 64,
+};
+
+struct acpi_device_status {
+ u32 present: 1;
+ u32 enabled: 1;
+ u32 show_in_ui: 1;
+ u32 functional: 1;
+ u32 battery_present: 1;
+ u32 reserved: 27;
+};
+
+struct acpi_device_flags {
+ u32 dynamic_status: 1;
+ u32 removable: 1;
+ u32 ejectable: 1;
+ u32 power_manageable: 1;
+ u32 match_driver: 1;
+ u32 initialized: 1;
+ u32 visited: 1;
+ u32 hotplug_notify: 1;
+ u32 is_dock_station: 1;
+ u32 of_compatible_ok: 1;
+ u32 coherent_dma: 1;
+ u32 cca_seen: 1;
+ u32 enumeration_by_parent: 1;
+ u32 reserved: 19;
+};
+
+typedef char acpi_bus_id[8];
+
+struct acpi_pnp_type {
+ u32 hardware_id: 1;
+ u32 bus_address: 1;
+ u32 platform_id: 1;
+ u32 reserved: 29;
+};
+
+typedef u64 acpi_bus_address;
+
+typedef char acpi_device_name[40];
+
+typedef char acpi_device_class[20];
+
+union acpi_object;
+
+struct acpi_device_pnp {
+ acpi_bus_id bus_id;
+ struct acpi_pnp_type type;
+ acpi_bus_address bus_address;
+ char *unique_id;
+ struct list_head ids;
+ acpi_device_name device_name;
+ acpi_device_class device_class;
+ union acpi_object *str_obj;
+};
+
+struct acpi_device_power_flags {
+ u32 explicit_get: 1;
+ u32 power_resources: 1;
+ u32 inrush_current: 1;
+ u32 power_removed: 1;
+ u32 ignore_parent: 1;
+ u32 dsw_present: 1;
+ u32 reserved: 26;
+};
+
+struct acpi_device_power_state {
+ struct {
+ u8 valid: 1;
+ u8 explicit_set: 1;
+ u8 reserved: 6;
+ } flags;
+ int power;
+ int latency;
+ struct list_head resources;
+};
+
+struct acpi_device_power {
+ int state;
+ struct acpi_device_power_flags flags;
+ struct acpi_device_power_state states[5];
+};
+
+struct acpi_device_wakeup_flags {
+ u8 valid: 1;
+ u8 notifier_present: 1;
+};
+
+struct acpi_device_wakeup_context {
+ void (*func)(struct acpi_device_wakeup_context *);
+ struct device *dev;
+};
+
+struct acpi_device_wakeup {
+ acpi_handle gpe_device;
+ u64 gpe_number;
+ u64 sleep_state;
+ struct list_head resources;
+ struct acpi_device_wakeup_flags flags;
+ struct acpi_device_wakeup_context context;
+ struct wakeup_source *ws;
+ int prepare_count;
+ int enable_count;
+};
+
+struct acpi_device_perf_flags {
+ u8 reserved: 8;
+};
+
+struct acpi_device_perf_state;
+
+struct acpi_device_perf {
+ int state;
+ struct acpi_device_perf_flags flags;
+ int state_count;
+ struct acpi_device_perf_state *states;
+};
+
+struct acpi_device_dir {
+ struct proc_dir_entry *entry;
+};
+
+struct acpi_device_data {
+ const union acpi_object *pointer;
+ struct list_head properties;
+ const union acpi_object *of_compatible;
+ struct list_head subnodes;
+};
+
+struct acpi_scan_handler;
+
+struct acpi_hotplug_context;
+
+struct acpi_driver;
+
+struct acpi_gpio_mapping;
+
+struct acpi_device {
+ int device_type;
+ acpi_handle handle;
+ struct fwnode_handle fwnode;
+ struct acpi_device *parent;
+ struct list_head children;
+ struct list_head node;
+ struct list_head wakeup_list;
+ struct list_head del_list;
+ struct acpi_device_status status;
+ struct acpi_device_flags flags;
+ struct acpi_device_pnp pnp;
+ struct acpi_device_power power;
+ struct acpi_device_wakeup wakeup;
+ struct acpi_device_perf performance;
+ struct acpi_device_dir dir;
+ struct acpi_device_data data;
+ struct acpi_scan_handler *handler;
+ struct acpi_hotplug_context *hp;
+ struct acpi_driver *driver;
+ const struct acpi_gpio_mapping *driver_gpios;
+ void *driver_data;
+ struct device dev;
+ unsigned int physical_node_count;
+ unsigned int dep_unmet;
+ struct list_head physical_node_list;
+ struct mutex physical_node_lock;
+ void (*remove)(struct acpi_device *);
+};
+
+struct hotplug_slot_ops {
+ int (*enable_slot)(struct hotplug_slot *);
+ int (*disable_slot)(struct hotplug_slot *);
+ int (*set_attention_status)(struct hotplug_slot *, u8);
+ int (*hardware_test)(struct hotplug_slot *, u32);
+ int (*get_power_status)(struct hotplug_slot *, u8 *);
+ int (*get_attention_status)(struct hotplug_slot *, u8 *);
+ int (*get_latch_status)(struct hotplug_slot *, u8 *);
+ int (*get_adapter_status)(struct hotplug_slot *, u8 *);
+ int (*reset_slot)(struct hotplug_slot *, int);
+};
+
+typedef u64 acpi_io_address;
+
+typedef u32 acpi_object_type;
+
+union acpi_object {
+ acpi_object_type type;
+ struct {
+ acpi_object_type type;
+ u64 value;
+ } integer;
+ struct {
+ acpi_object_type type;
+ u32 length;
+ char *pointer;
+ } string;
+ struct {
+ acpi_object_type type;
+ u32 length;
+ u8 *pointer;
+ } buffer;
+ struct {
+ acpi_object_type type;
+ u32 count;
+ union acpi_object *elements;
+ } package;
+ struct {
+ acpi_object_type type;
+ acpi_object_type actual_type;
+ acpi_handle handle;
+ } reference;
+ struct {
+ acpi_object_type type;
+ u32 proc_id;
+ acpi_io_address pblk_address;
+ u32 pblk_length;
+ } processor;
+ struct {
+ acpi_object_type type;
+ u32 system_level;
+ u32 resource_order;
+ } power_resource;
+};
+
+struct acpi_hotplug_profile {
+ struct kobject kobj;
+ int (*scan_dependent)(struct acpi_device *);
+ void (*notify_online)(struct acpi_device *);
+ bool enabled: 1;
+ bool demand_offline: 1;
+};
+
+struct acpi_scan_handler {
+ const struct acpi_device_id *ids;
+ struct list_head list_node;
+ bool (*match)(const char *, const struct acpi_device_id **);
+ int (*attach)(struct acpi_device *, const struct acpi_device_id *);
+ void (*detach)(struct acpi_device *);
+ void (*bind)(struct device *);
+ void (*unbind)(struct device *);
+ struct acpi_hotplug_profile hotplug;
+};
+
+struct acpi_hotplug_context {
+ struct acpi_device *self;
+ int (*notify)(struct acpi_device *, u32);
+ void (*uevent)(struct acpi_device *, u32);
+ void (*fixup)(struct acpi_device *);
+};
+
+typedef int (*acpi_op_add)(struct acpi_device *);
+
+typedef int (*acpi_op_remove)(struct acpi_device *);
+
+typedef void (*acpi_op_notify)(struct acpi_device *, u32);
+
+struct acpi_device_ops {
+ acpi_op_add add;
+ acpi_op_remove remove;
+ acpi_op_notify notify;
+};
+
+struct acpi_driver {
+ char name[80];
+ char class[80];
+ const struct acpi_device_id *ids;
+ unsigned int flags;
+ struct acpi_device_ops ops;
+ struct device_driver drv;
+ struct module *owner;
+};
+
+struct acpi_device_perf_state {
+ struct {
+ u8 valid: 1;
+ u8 reserved: 7;
+ } flags;
+ u8 power;
+ u8 performance;
+ int latency;
+};
+
+struct acpi_gpio_params;
+
+struct acpi_gpio_mapping {
+ const char *name;
+ const struct acpi_gpio_params *data;
+ unsigned int size;
+ unsigned int quirks;
+};
+
+enum pci_bar_type {
+ pci_bar_unknown = 0,
+ pci_bar_io = 1,
+ pci_bar_mem32 = 2,
+ pci_bar_mem64 = 3,
+};
+
+struct pci_domain_busn_res {
+ struct list_head list;
+ struct resource res;
+ int domain_nr;
+};
+
+enum rpm_status {
+ RPM_ACTIVE = 0,
+ RPM_RESUMING = 1,
+ RPM_SUSPENDED = 2,
+ RPM_SUSPENDING = 3,
+};
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct bus_type *, char *);
+ ssize_t (*store)(struct bus_type *, const char *, size_t);
+};
+
+enum pcie_reset_state {
+ pcie_deassert_reset = 1,
+ pcie_warm_reset = 2,
+ pcie_hot_reset = 3,
+};
+
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0,
+ PCIE_LNK_X1 = 1,
+ PCIE_LNK_X2 = 2,
+ PCIE_LNK_X4 = 4,
+ PCIE_LNK_X8 = 8,
+ PCIE_LNK_X12 = 12,
+ PCIE_LNK_X16 = 16,
+ PCIE_LNK_X32 = 32,
+ PCIE_LNK_WIDTH_UNKNOWN = 255,
+};
+
+struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[0];
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
+};
+
+typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32);
+
+struct pci_platform_pm_ops {
+ bool (*bridge_d3)(struct pci_dev *);
+ bool (*is_manageable)(struct pci_dev *);
+ int (*set_state)(struct pci_dev *, pci_power_t);
+ pci_power_t (*get_state)(struct pci_dev *);
+ void (*refresh_state)(struct pci_dev *);
+ pci_power_t (*choose_state)(struct pci_dev *);
+ int (*set_wakeup)(struct pci_dev *, bool);
+ bool (*need_resume)(struct pci_dev *);
+};
+
+struct pci_pme_device {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+struct pci_saved_state {
+ u32 config_space[16];
+ struct pci_cap_saved_data cap[0];
+};
+
+struct pci_devres {
+ unsigned int enabled: 1;
+ unsigned int pinned: 1;
+ unsigned int orig_intx: 1;
+ unsigned int restore_intx: 1;
+ unsigned int mwi: 1;
+ u32 region_mask;
+};
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *, char *);
+ ssize_t (*store)(struct device_driver *, const char *, size_t);
+};
+
+enum pci_ers_result {
+ PCI_ERS_RESULT_NONE = 1,
+ PCI_ERS_RESULT_CAN_RECOVER = 2,
+ PCI_ERS_RESULT_NEED_RESET = 3,
+ PCI_ERS_RESULT_DISCONNECT = 4,
+ PCI_ERS_RESULT_RECOVERED = 5,
+ PCI_ERS_RESULT_NO_AER_DRIVER = 6,
+};
+
+struct pcie_device {
+ int irq;
+ struct pci_dev *port;
+ u32 service;
+ void *priv_data;
+ struct device device;
+};
+
+struct pcie_port_service_driver {
+ const char *name;
+ int (*probe)(struct pcie_device *);
+ void (*remove)(struct pcie_device *);
+ int (*suspend)(struct pcie_device *);
+ int (*resume_noirq)(struct pcie_device *);
+ int (*resume)(struct pcie_device *);
+ int (*runtime_suspend)(struct pcie_device *);
+ int (*runtime_resume)(struct pcie_device *);
+ void (*error_resume)(struct pci_dev *);
+ int port_type;
+ u32 service;
+ struct device_driver driver;
+};
+
+struct pci_dynid {
+ struct list_head node;
+ struct pci_device_id id;
+};
+
+struct drv_dev_and_id {
+ struct pci_driver *drv;
+ struct pci_dev *dev;
+ const struct pci_device_id *id;
+};
+
+enum pci_mmap_state {
+ pci_mmap_io = 0,
+ pci_mmap_mem = 1,
+};
+
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS = 0,
+ PCI_MMAP_PROCFS = 1,
+};
+
+enum pci_lost_interrupt_reason {
+ PCI_LOST_IRQ_NO_INFORMATION = 0,
+ PCI_LOST_IRQ_DISABLE_MSI = 1,
+ PCI_LOST_IRQ_DISABLE_MSIX = 2,
+ PCI_LOST_IRQ_DISABLE_ACPI = 3,
+};
+
+struct pci_vpd_ops;
+
+struct pci_vpd {
+ const struct pci_vpd_ops *ops;
+ struct bin_attribute *attr;
+ struct mutex lock;
+ unsigned int len;
+ u16 flag;
+ u8 cap;
+ unsigned int busy: 1;
+ unsigned int valid: 1;
+};
+
+struct pci_vpd_ops {
+ ssize_t (*read)(struct pci_dev *, loff_t, size_t, void *);
+ ssize_t (*write)(struct pci_dev *, loff_t, size_t, const void *);
+ int (*set_size)(struct pci_dev *, size_t);
+};
+
+struct pci_dev_resource {
+ struct list_head list;
+ struct resource *res;
+ struct pci_dev *dev;
+ resource_size_t start;
+ resource_size_t end;
+ resource_size_t add_size;
+ resource_size_t min_align;
+ long unsigned int flags;
+};
+
+enum release_type {
+ leaf_only = 0,
+ whole_subtree = 1,
+};
+
+enum enable_type {
+ undefined = -1,
+ user_disabled = 0,
+ auto_disabled = 1,
+ user_enabled = 2,
+ auto_enabled = 3,
+};
+
+struct portdrv_service_data {
+ struct pcie_port_service_driver *drv;
+ struct device *dev;
+ u32 service;
+};
+
+struct aspm_latency {
+ u32 l0s;
+ u32 l1;
+};
+
+struct pcie_link_state {
+ struct pci_dev *pdev;
+ struct pci_dev *downstream;
+ struct pcie_link_state *root;
+ struct pcie_link_state *parent;
+ struct list_head sibling;
+ u32 aspm_support: 7;
+ u32 aspm_enabled: 7;
+ u32 aspm_capable: 7;
+ u32 aspm_default: 7;
+ char: 4;
+ u32 aspm_disable: 7;
+ u32 clkpm_capable: 1;
+ u32 clkpm_enabled: 1;
+ u32 clkpm_default: 1;
+ u32 clkpm_disable: 1;
+ struct aspm_latency latency_up;
+ struct aspm_latency latency_dw;
+ struct aspm_latency acceptable[8];
+ struct {
+ u32 up_cap_ptr;
+ u32 dw_cap_ptr;
+ u32 ctl1;
+ u32 ctl2;
+ } l1ss;
+};
+
+struct aspm_register_info {
+ u32 support: 2;
+ u32 enabled: 2;
+ u32 latency_encoding_l0s;
+ u32 latency_encoding_l1;
+ u32 l1ss_cap_ptr;
+ u32 l1ss_cap;
+ u32 l1ss_ctl1;
+ u32 l1ss_ctl2;
+};
+
+struct pci_filp_private {
+ enum pci_mmap_state mmap_state;
+ int write_combine;
+};
+
+struct pci_slot_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct pci_slot *, char *);
+ ssize_t (*store)(struct pci_slot *, const char *, size_t);
+};
+
+typedef u64 acpi_size;
+
+struct acpi_buffer {
+ acpi_size length;
+ void *pointer;
+};
+
+struct acpi_bus_type {
+ struct list_head list;
+ const char *name;
+ bool (*match)(struct device *);
+ struct acpi_device * (*find_companion)(struct device *);
+ void (*setup)(struct device *);
+ void (*cleanup)(struct device *);
+};
+
+struct acpi_pci_root {
+ struct acpi_device *device;
+ struct pci_bus *bus;
+ u16 segment;
+ struct resource secondary;
+ u32 osc_support_set;
+ u32 osc_control_set;
+ phys_addr_t mcfg_addr;
+};
+
+enum pm_qos_flags_status {
+ PM_QOS_FLAGS_UNDEFINED = -1,
+ PM_QOS_FLAGS_NONE = 0,
+ PM_QOS_FLAGS_SOME = 1,
+ PM_QOS_FLAGS_ALL = 2,
+};
+
+struct hpx_type0 {
+ u32 revision;
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 enable_serr;
+ u8 enable_perr;
+};
+
+struct hpx_type1 {
+ u32 revision;
+ u8 max_mem_read;
+ u8 avg_max_split;
+ u16 tot_max_split;
+};
+
+struct hpx_type2 {
+ u32 revision;
+ u32 unc_err_mask_and;
+ u32 unc_err_mask_or;
+ u32 unc_err_sever_and;
+ u32 unc_err_sever_or;
+ u32 cor_err_mask_and;
+ u32 cor_err_mask_or;
+ u32 adv_err_cap_and;
+ u32 adv_err_cap_or;
+ u16 pci_exp_devctl_and;
+ u16 pci_exp_devctl_or;
+ u16 pci_exp_lnkctl_and;
+ u16 pci_exp_lnkctl_or;
+ u32 sec_unc_err_sever_and;
+ u32 sec_unc_err_sever_or;
+ u32 sec_unc_err_mask_and;
+ u32 sec_unc_err_mask_or;
+};
+
+struct hpx_type3 {
+ u16 device_type;
+ u16 function_type;
+ u16 config_space_location;
+ u16 pci_exp_cap_id;
+ u16 pci_exp_cap_ver;
+ u16 pci_exp_vendor_id;
+ u16 dvsec_id;
+ u16 dvsec_rev;
+ u16 match_offset;
+ u32 match_mask_and;
+ u32 match_value;
+ u16 reg_offset;
+ u32 reg_mask_and;
+ u32 reg_mask_or;
+};
+
+enum hpx_type3_dev_type {
+ HPX_TYPE_ENDPOINT = 1,
+ HPX_TYPE_LEG_END = 2,
+ HPX_TYPE_RC_END = 4,
+ HPX_TYPE_RC_EC = 8,
+ HPX_TYPE_ROOT_PORT = 16,
+ HPX_TYPE_UPSTREAM = 32,
+ HPX_TYPE_DOWNSTREAM = 64,
+ HPX_TYPE_PCI_BRIDGE = 128,
+ HPX_TYPE_PCIE_BRIDGE = 256,
+};
+
+enum hpx_type3_fn_type {
+ HPX_FN_NORMAL = 1,
+ HPX_FN_SRIOV_PHYS = 2,
+ HPX_FN_SRIOV_VIRT = 4,
+};
+
+enum hpx_type3_cfg_loc {
+ HPX_CFG_PCICFG = 0,
+ HPX_CFG_PCIE_CAP = 1,
+ HPX_CFG_PCIE_CAP_EXT = 2,
+ HPX_CFG_VEND_CAP = 3,
+ HPX_CFG_DVSEC = 4,
+ HPX_CFG_MAX = 5,
+};
+
+enum pci_irq_reroute_variant {
+ INTEL_IRQ_REROUTE_VARIANT = 1,
+ MAX_IRQ_REROUTE_VARIANTS = 3,
+};
+
+struct pci_fixup {
+ u16 vendor;
+ u16 device;
+ u32 class;
+ unsigned int class_shift;
+ int hook_offset;
+};
+
+enum {
+ NVME_REG_CAP = 0,
+ NVME_REG_VS = 8,
+ NVME_REG_INTMS = 12,
+ NVME_REG_INTMC = 16,
+ NVME_REG_CC = 20,
+ NVME_REG_CSTS = 28,
+ NVME_REG_NSSR = 32,
+ NVME_REG_AQA = 36,
+ NVME_REG_ASQ = 40,
+ NVME_REG_ACQ = 48,
+ NVME_REG_CMBLOC = 56,
+ NVME_REG_CMBSZ = 60,
+ NVME_REG_BPINFO = 64,
+ NVME_REG_BPRSEL = 68,
+ NVME_REG_BPMBL = 72,
+ NVME_REG_PMRCAP = 3584,
+ NVME_REG_PMRCTL = 3588,
+ NVME_REG_PMRSTS = 3592,
+ NVME_REG_PMREBS = 3596,
+ NVME_REG_PMRSWTP = 3600,
+ NVME_REG_DBS = 4096,
+};
+
+enum {
+ NVME_CC_ENABLE = 1,
+ NVME_CC_CSS_NVM = 0,
+ NVME_CC_EN_SHIFT = 0,
+ NVME_CC_CSS_SHIFT = 4,
+ NVME_CC_MPS_SHIFT = 7,
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_AMS_RR = 0,
+ NVME_CC_AMS_WRRU = 2048,
+ NVME_CC_AMS_VS = 14336,
+ NVME_CC_SHN_NONE = 0,
+ NVME_CC_SHN_NORMAL = 16384,
+ NVME_CC_SHN_ABRUPT = 32768,
+ NVME_CC_SHN_MASK = 49152,
+ NVME_CC_IOSQES = 393216,
+ NVME_CC_IOCQES = 4194304,
+ NVME_CSTS_RDY = 1,
+ NVME_CSTS_CFS = 2,
+ NVME_CSTS_NSSRO = 16,
+ NVME_CSTS_PP = 32,
+ NVME_CSTS_SHST_NORMAL = 0,
+ NVME_CSTS_SHST_OCCUR = 4,
+ NVME_CSTS_SHST_CMPLT = 8,
+ NVME_CSTS_SHST_MASK = 12,
+};
+
+enum {
+ NVME_AEN_BIT_NS_ATTR = 8,
+ NVME_AEN_BIT_FW_ACT = 9,
+ NVME_AEN_BIT_ANA_CHANGE = 11,
+ NVME_AEN_BIT_DISC_CHANGE = 31,
+};
+
+enum {
+ SWITCHTEC_GAS_MRPC_OFFSET = 0,
+ SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096,
+ SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144,
+ SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192,
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704,
+ SWITCHTEC_GAS_PART_CFG_OFFSET = 16384,
+ SWITCHTEC_GAS_NTB_OFFSET = 65536,
+ SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568,
+};
+
+enum {
+ SWITCHTEC_NTB_REG_INFO_OFFSET = 0,
+ SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384,
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600,
+};
+
+struct nt_partition_info {
+ u32 xlink_enabled;
+ u32 target_part_low;
+ u32 target_part_high;
+ u32 reserved;
+};
+
+struct ntb_info_regs {
+ u8 partition_count;
+ u8 partition_id;
+ u16 reserved1;
+ u64 ep_map;
+ u16 requester_id;
+ u16 reserved2;
+ u32 reserved3[4];
+ struct nt_partition_info ntp_info[48];
+} __attribute__((packed));
+
+struct ntb_ctrl_regs {
+ u32 partition_status;
+ u32 partition_op;
+ u32 partition_ctrl;
+ u32 bar_setup;
+ u32 bar_error;
+ u16 lut_table_entries;
+ u16 lut_table_offset;
+ u32 lut_error;
+ u16 req_id_table_size;
+ u16 req_id_table_offset;
+ u32 req_id_error;
+ u32 reserved1[7];
+ struct {
+ u32 ctl;
+ u32 win_size;
+ u64 xlate_addr;
+ } bar_entry[6];
+ struct {
+ u32 win_size;
+ u32 reserved[3];
+ } bar_ext_entry[6];
+ u32 reserved2[192];
+ u32 req_id_table[512];
+ u32 reserved3[256];
+ u64 lut_entry[512];
+};
+
+struct pci_dev_reset_methods {
+ u16 vendor;
+ u16 device;
+ int (*reset)(struct pci_dev *, int);
+};
+
+struct pci_dev_acs_enabled {
+ u16 vendor;
+ u16 device;
+ int (*acs_enabled)(struct pci_dev *, u16);
+};
+
+struct pci_dev_acs_ops {
+ u16 vendor;
+ u16 device;
+ int (*enable_acs)(struct pci_dev *);
+ int (*disable_acs_redir)(struct pci_dev *);
+};
+
+struct controller {
+ struct pcie_device *pcie;
+ u32 slot_cap;
+ unsigned int inband_presence_disabled: 1;
+ u16 slot_ctrl;
+ struct mutex ctrl_lock;
+ long unsigned int cmd_started;
+ unsigned int cmd_busy: 1;
+ wait_queue_head_t queue;
+ atomic_t pending_events;
+ unsigned int notification_enabled: 1;
+ unsigned int power_fault_detected;
+ struct task_struct *poll_thread;
+ u8 state;
+ struct mutex state_lock;
+ struct delayed_work button_work;
+ struct hotplug_slot hotplug_slot;
+ struct rw_semaphore reset_lock;
+ unsigned int ist_running;
+ int request_result;
+ wait_queue_head_t requester;
+};
+
+struct controller___2;
+
+struct hpc_ops;
+
+struct slot {
+ u8 bus;
+ u8 device;
+ u16 status;
+ u32 number;
+ u8 is_a_board;
+ u8 state;
+ u8 attention_save;
+ u8 presence_save;
+ u8 latch_save;
+ u8 pwr_save;
+ struct controller___2 *ctrl;
+ const struct hpc_ops *hpc_ops;
+ struct hotplug_slot hotplug_slot;
+ struct list_head slot_list;
+ struct delayed_work work;
+ struct mutex lock;
+ struct workqueue_struct *wq;
+ u8 hp_slot;
+};
+
+struct controller___2 {
+ struct mutex crit_sect;
+ struct mutex cmd_lock;
+ int num_slots;
+ int slot_num_inc;
+ struct pci_dev *pci_dev;
+ struct list_head slot_list;
+ const struct hpc_ops *hpc_ops;
+ wait_queue_head_t queue;
+ u8 slot_device_offset;
+ u32 pcix_misc2_reg;
+ u32 first_slot;
+ u32 cap_offset;
+ long unsigned int mmio_base;
+ long unsigned int mmio_size;
+ void *creg;
+ struct timer_list poll_timer;
+};
+
+struct hpc_ops {
+ int (*power_on_slot)(struct slot *);
+ int (*slot_enable)(struct slot *);
+ int (*slot_disable)(struct slot *);
+ int (*set_bus_speed_mode)(struct slot *, enum pci_bus_speed);
+ int (*get_power_status)(struct slot *, u8 *);
+ int (*get_attention_status)(struct slot *, u8 *);
+ int (*set_attention_status)(struct slot *, u8);
+ int (*get_latch_status)(struct slot *, u8 *);
+ int (*get_adapter_status)(struct slot *, u8 *);
+ int (*get_adapter_speed)(struct slot *, enum pci_bus_speed *);
+ int (*get_mode1_ECC_cap)(struct slot *, u8 *);
+ int (*get_prog_int)(struct slot *, u8 *);
+ int (*query_power_fault)(struct slot *);
+ void (*green_led_on)(struct slot *);
+ void (*green_led_off)(struct slot *);
+ void (*green_led_blink)(struct slot *);
+ void (*release_ctlr)(struct controller___2 *);
+ int (*check_cmd_status)(struct controller___2 *);
+};
+
+struct event_info {
+ u32 event_type;
+ struct slot *p_slot;
+ struct work_struct work;
+};
+
+struct pushbutton_work_info {
+ struct slot *p_slot;
+ struct work_struct work;
+};
+
+enum ctrl_offsets {
+ BASE_OFFSET = 0,
+ SLOT_AVAIL1 = 4,
+ SLOT_AVAIL2 = 8,
+ SLOT_CONFIG = 12,
+ SEC_BUS_CONFIG = 16,
+ MSI_CTRL = 18,
+ PROG_INTERFACE = 19,
+ CMD = 20,
+ CMD_STATUS = 22,
+ INTR_LOC = 24,
+ SERR_LOC = 28,
+ SERR_INTR_ENABLE = 32,
+ SLOT1 = 36,
+};
+
+struct acpiphp_slot;
+
+struct slot___2 {
+ struct hotplug_slot hotplug_slot;
+ struct acpiphp_slot *acpi_slot;
+ unsigned int sun;
+};
+
+struct acpiphp_slot {
+ struct list_head node;
+ struct pci_bus *bus;
+ struct list_head funcs;
+ struct slot___2 *slot;
+ u8 device;
+ u32 flags;
+};
+
+struct acpiphp_attention_info {
+ int (*set_attn)(struct hotplug_slot *, u8);
+ int (*get_attn)(struct hotplug_slot *, u8 *);
+ struct module *owner;
+};
+
+struct acpiphp_context;
+
+struct acpiphp_bridge {
+ struct list_head list;
+ struct list_head slots;
+ struct kref ref;
+ struct acpiphp_context *context;
+ int nr_slots;
+ struct pci_bus *pci_bus;
+ struct pci_dev *pci_dev;
+ bool is_going_away;
+};
+
+struct acpiphp_func {
+ struct acpiphp_bridge *parent;
+ struct acpiphp_slot *slot;
+ struct list_head sibling;
+ u8 function;
+ u32 flags;
+};
+
+struct acpiphp_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_func func;
+ struct acpiphp_bridge *bridge;
+ unsigned int refcount;
+};
+
+struct acpiphp_root_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_bridge *root_bridge;
+};
+
+struct msix_entry {
+ u32 vector;
+ u16 entry;
+};
+
+enum dmi_device_type {
+ DMI_DEV_TYPE_ANY = 0,
+ DMI_DEV_TYPE_OTHER = 1,
+ DMI_DEV_TYPE_UNKNOWN = 2,
+ DMI_DEV_TYPE_VIDEO = 3,
+ DMI_DEV_TYPE_SCSI = 4,
+ DMI_DEV_TYPE_ETHERNET = 5,
+ DMI_DEV_TYPE_TOKENRING = 6,
+ DMI_DEV_TYPE_SOUND = 7,
+ DMI_DEV_TYPE_PATA = 8,
+ DMI_DEV_TYPE_SATA = 9,
+ DMI_DEV_TYPE_SAS = 10,
+ DMI_DEV_TYPE_IPMI = -1,
+ DMI_DEV_TYPE_OEM_STRING = -2,
+ DMI_DEV_TYPE_DEV_ONBOARD = -3,
+ DMI_DEV_TYPE_DEV_SLOT = -4,
+};
+
+struct dmi_device {
+ struct list_head list;
+ int type;
+ const char *name;
+ void *device_data;
+};
+
+struct dmi_dev_onboard {
+ struct dmi_device dev;
+ int instance;
+ int segment;
+ int bus;
+ int devfn;
+};
+
+enum smbios_attr_enum {
+ SMBIOS_ATTR_NONE = 0,
+ SMBIOS_ATTR_LABEL_SHOW = 1,
+ SMBIOS_ATTR_INSTANCE_SHOW = 2,
+};
+
+enum acpi_attr_enum {
+ ACPI_ATTR_LABEL_SHOW = 0,
+ ACPI_ATTR_INDEX_SHOW = 1,
+};
+
+struct vgastate {
+ void *vgabase;
+ long unsigned int membase;
+ __u32 memsize;
+ __u32 flags;
+ __u32 depth;
+ __u32 num_attr;
+ __u32 num_crtc;
+ __u32 num_gfx;
+ __u32 num_seq;
+ void *vidstate;
+};
+
+typedef u16 acpi_owner_id;
+
+union acpi_name_union {
+ u32 integer;
+ char ascii[4];
+};
+
+struct acpi_table_desc {
+ acpi_physical_address address;
+ struct acpi_table_header *pointer;
+ u32 length;
+ union acpi_name_union signature;
+ acpi_owner_id owner_id;
+ u8 flags;
+ u16 validation_count;
+};
+
+struct acpi_madt_io_sapic {
+ struct acpi_subtable_header header;
+ u8 id;
+ u8 reserved;
+ u32 global_irq_base;
+ u64 address;
+};
+
+struct acpi_madt_interrupt_source {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u8 type;
+ u8 id;
+ u8 eid;
+ u8 io_sapic_vector;
+ u32 global_irq;
+ u32 flags;
+};
+
+struct acpi_madt_generic_interrupt {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 cpu_interface_number;
+ u32 uid;
+ u32 flags;
+ u32 parking_version;
+ u32 performance_interrupt;
+ u64 parked_address;
+ u64 base_address;
+ u64 gicv_base_address;
+ u64 gich_base_address;
+ u32 vgic_interrupt;
+ u64 gicr_base_address;
+ u64 arm_mpidr;
+ u8 efficiency_class;
+ u8 reserved2[1];
+ u16 spe_interrupt;
+} __attribute__((packed));
+
+struct acpi_madt_generic_distributor {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 gic_id;
+ u64 base_address;
+ u32 global_irq_base;
+ u8 version;
+ u8 reserved2[3];
+};
+
+typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *);
+
+struct transaction;
+
+struct acpi_ec {
+ acpi_handle handle;
+ int gpe;
+ int irq;
+ long unsigned int command_addr;
+ long unsigned int data_addr;
+ bool global_lock;
+ long unsigned int flags;
+ long unsigned int reference_count;
+ struct mutex mutex;
+ wait_queue_head_t wait;
+ struct list_head list;
+ struct transaction *curr;
+ spinlock_t lock;
+ struct work_struct work;
+ long unsigned int timestamp;
+ long unsigned int nr_pending_queries;
+ bool busy_polling;
+ unsigned int polling_guard;
+};
+
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON = 0,
+ ACPI_SUBTABLE_HMAT = 1,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
+enum acpi_predicate {
+ all_versions = 0,
+ less_than_or_equal = 1,
+ equal = 2,
+ greater_than_or_equal = 3,
+};
+
+struct acpi_platform_list {
+ char oem_id[7];
+ char oem_table_id[9];
+ u32 oem_revision;
+ char *table;
+ enum acpi_predicate pred;
+ char *reason;
+ u32 data;
+};
+
+typedef char *acpi_string;
+
+struct acpi_osi_entry {
+ char string[64];
+ bool enable;
+};
+
+struct acpi_osi_config {
+ u8 default_disabling;
+ unsigned int linux_enable: 1;
+ unsigned int linux_dmi: 1;
+ unsigned int linux_cmdline: 1;
+ unsigned int darwin_enable: 1;
+ unsigned int darwin_dmi: 1;
+ unsigned int darwin_cmdline: 1;
+};
+
+typedef u32 acpi_name;
+
+struct acpi_predefined_names {
+ const char *name;
+ u8 type;
+ char *val;
+};
+
+typedef u32 (*acpi_osd_handler)(void *);
+
+typedef void (*acpi_osd_exec_callback)(void *);
+
+typedef u32 (*acpi_sci_handler)(void *);
+
+typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *);
+
+typedef u32 (*acpi_event_handler)(void *);
+
+typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *);
+
+typedef void (*acpi_notify_handler)(acpi_handle, u32, void *);
+
+typedef void (*acpi_object_handler)(acpi_handle, void *);
+
+typedef acpi_status (*acpi_init_handler)(acpi_handle, u32);
+
+typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *);
+
+typedef acpi_status (*acpi_table_handler)(u32, void *, void *);
+
+typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *);
+
+typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **);
+
+typedef u32 (*acpi_interface_handler)(acpi_string, u32);
+
+struct acpi_pci_id {
+ u16 segment;
+ u16 bus;
+ u16 device;
+ u16 function;
+};
+
+struct acpi_mem_space_context {
+ u32 length;
+ acpi_physical_address address;
+ acpi_physical_address mapped_physical_address;
+ u8 *mapped_logical_address;
+ acpi_size mapped_length;
+};
+
+struct acpi_table_facs {
+ char signature[4];
+ u32 length;
+ u32 hardware_signature;
+ u32 firmware_waking_vector;
+ u32 global_lock;
+ u32 flags;
+ u64 xfirmware_waking_vector;
+ u8 version;
+ u8 reserved[3];
+ u32 ospm_flags;
+ u8 reserved1[24];
+};
+
+typedef enum {
+ OSL_GLOBAL_LOCK_HANDLER = 0,
+ OSL_NOTIFY_HANDLER = 1,
+ OSL_GPE_HANDLER = 2,
+ OSL_DEBUGGER_MAIN_THREAD = 3,
+ OSL_DEBUGGER_EXEC_THREAD = 4,
+ OSL_EC_POLL_HANDLER = 5,
+ OSL_EC_BURST_HANDLER = 6,
+} acpi_execute_type;
+
+struct acpi_gpio_params {
+ unsigned int crs_entry_index;
+ unsigned int line_index;
+ bool active_low;
+};
+
+struct acpi_rw_lock {
+ void *writer_mutex;
+ void *reader_mutex;
+ u32 num_readers;
+};
+
+struct acpi_mutex_info {
+ void *mutex;
+ u32 use_count;
+ u64 thread_id;
+};
+
+union acpi_operand_object;
+
+struct acpi_namespace_node {
+ union acpi_operand_object *object;
+ u8 descriptor_type;
+ u8 type;
+ u16 flags;
+ union acpi_name_union name;
+ struct acpi_namespace_node *parent;
+ struct acpi_namespace_node *child;
+ struct acpi_namespace_node *peer;
+ acpi_owner_id owner_id;
+};
+
+struct acpi_object_common {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+};
+
+struct acpi_object_integer {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 fill[3];
+ u64 value;
+};
+
+struct acpi_object_string {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ char *pointer;
+ u32 length;
+};
+
+struct acpi_object_buffer {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 *pointer;
+ u32 length;
+ u32 aml_length;
+ u8 *aml_start;
+ struct acpi_namespace_node *node;
+};
+
+struct acpi_object_package {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object **elements;
+ u8 *aml_start;
+ u32 aml_length;
+ u32 count;
+};
+
+struct acpi_object_event {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ void *os_semaphore;
+};
+
+struct acpi_walk_state;
+
+typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *);
+
+struct acpi_object_method {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 info_flags;
+ u8 param_count;
+ u8 sync_level;
+ union acpi_operand_object *mutex;
+ union acpi_operand_object *node;
+ u8 *aml_start;
+ union {
+ acpi_internal_method implementation;
+ union acpi_operand_object *handler;
+ } dispatch;
+ u32 aml_length;
+ acpi_owner_id owner_id;
+ u8 thread_count;
+};
+
+struct acpi_thread_state;
+
+struct acpi_object_mutex {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 sync_level;
+ u16 acquisition_depth;
+ void *os_mutex;
+ u64 thread_id;
+ struct acpi_thread_state *owner_thread;
+ union acpi_operand_object *prev;
+ union acpi_operand_object *next;
+ struct acpi_namespace_node *node;
+ u8 original_sync_level;
+};
+
+struct acpi_object_region {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 space_id;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *handler;
+ union acpi_operand_object *next;
+ acpi_physical_address address;
+ u32 length;
+};
+
+struct acpi_object_notify_common {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ union acpi_operand_object *notify_list[2];
+ union acpi_operand_object *handler;
+};
+
+struct acpi_gpe_block_info;
+
+struct acpi_object_device {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ union acpi_operand_object *notify_list[2];
+ union acpi_operand_object *handler;
+ struct acpi_gpe_block_info *gpe_block;
+};
+
+struct acpi_object_power_resource {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ union acpi_operand_object *notify_list[2];
+ union acpi_operand_object *handler;
+ u32 system_level;
+ u32 resource_order;
+};
+
+struct acpi_object_processor {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 proc_id;
+ u8 length;
+ union acpi_operand_object *notify_list[2];
+ union acpi_operand_object *handler;
+ acpi_io_address address;
+};
+
+struct acpi_object_thermal_zone {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ union acpi_operand_object *notify_list[2];
+ union acpi_operand_object *handler;
+};
+
+struct acpi_object_field_common {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 field_flags;
+ u8 attribute;
+ u8 access_byte_width;
+ struct acpi_namespace_node *node;
+ u32 bit_length;
+ u32 base_byte_offset;
+ u32 value;
+ u8 start_field_bit_offset;
+ u8 access_length;
+ union acpi_operand_object *region_obj;
+};
+
+struct acpi_object_region_field {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 field_flags;
+ u8 attribute;
+ u8 access_byte_width;
+ struct acpi_namespace_node *node;
+ u32 bit_length;
+ u32 base_byte_offset;
+ u32 value;
+ u8 start_field_bit_offset;
+ u8 access_length;
+ u16 resource_length;
+ union acpi_operand_object *region_obj;
+ u8 *resource_buffer;
+ u16 pin_number_index;
+ u8 *internal_pcc_buffer;
+};
+
+struct acpi_object_buffer_field {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 field_flags;
+ u8 attribute;
+ u8 access_byte_width;
+ struct acpi_namespace_node *node;
+ u32 bit_length;
+ u32 base_byte_offset;
+ u32 value;
+ u8 start_field_bit_offset;
+ u8 access_length;
+ u8 is_create_field;
+ union acpi_operand_object *buffer_obj;
+};
+
+struct acpi_object_bank_field {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 field_flags;
+ u8 attribute;
+ u8 access_byte_width;
+ struct acpi_namespace_node *node;
+ u32 bit_length;
+ u32 base_byte_offset;
+ u32 value;
+ u8 start_field_bit_offset;
+ u8 access_length;
+ union acpi_operand_object *region_obj;
+ union acpi_operand_object *bank_obj;
+};
+
+struct acpi_object_index_field {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 field_flags;
+ u8 attribute;
+ u8 access_byte_width;
+ struct acpi_namespace_node *node;
+ u32 bit_length;
+ u32 base_byte_offset;
+ u32 value;
+ u8 start_field_bit_offset;
+ u8 access_length;
+ union acpi_operand_object *index_obj;
+ union acpi_operand_object *data_obj;
+};
+
+struct acpi_object_notify_handler {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ struct acpi_namespace_node *node;
+ u32 handler_type;
+ acpi_notify_handler handler;
+ void *context;
+ union acpi_operand_object *next[2];
+};
+
+struct acpi_object_addr_handler {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 space_id;
+ u8 handler_flags;
+ acpi_adr_space_handler handler;
+ struct acpi_namespace_node *node;
+ void *context;
+ acpi_adr_space_setup setup;
+ union acpi_operand_object *region_list;
+ union acpi_operand_object *next;
+};
+
+struct acpi_object_reference {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ u8 class;
+ u8 target_type;
+ u8 resolved;
+ void *object;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object **where;
+ u8 *index_pointer;
+ u8 *aml;
+ u32 value;
+};
+
+struct acpi_object_extra {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ struct acpi_namespace_node *method_REG;
+ struct acpi_namespace_node *scope_node;
+ void *region_context;
+ u8 *aml_start;
+ u32 aml_length;
+};
+
+struct acpi_object_data {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ acpi_object_handler handler;
+ void *pointer;
+};
+
+struct acpi_object_cache_list {
+ union acpi_operand_object *next_object;
+ u8 descriptor_type;
+ u8 type;
+ u16 reference_count;
+ u8 flags;
+ union acpi_operand_object *next;
+};
+
+union acpi_operand_object {
+ struct acpi_object_common common;
+ struct acpi_object_integer integer;
+ struct acpi_object_string string;
+ struct acpi_object_buffer buffer;
+ struct acpi_object_package package;
+ struct acpi_object_event event;
+ struct acpi_object_method method;
+ struct acpi_object_mutex mutex;
+ struct acpi_object_region region;
+ struct acpi_object_notify_common common_notify;
+ struct acpi_object_device device;
+ struct acpi_object_power_resource power_resource;
+ struct acpi_object_processor processor;
+ struct acpi_object_thermal_zone thermal_zone;
+ struct acpi_object_field_common common_field;
+ struct acpi_object_region_field field;
+ struct acpi_object_buffer_field buffer_field;
+ struct acpi_object_bank_field bank_field;
+ struct acpi_object_index_field index_field;
+ struct acpi_object_notify_handler notify;
+ struct acpi_object_addr_handler address_space;
+ struct acpi_object_reference reference;
+ struct acpi_object_extra extra;
+ struct acpi_object_data data;
+ struct acpi_object_cache_list cache;
+ struct acpi_namespace_node node;
+};
+
+struct acpi_table_list {
+ struct acpi_table_desc *tables;
+ u32 current_table_count;
+ u32 max_table_count;
+ u8 flags;
+};
+
+union acpi_parse_object;
+
+union acpi_generic_state;
+
+struct acpi_parse_state {
+ u8 *aml_start;
+ u8 *aml;
+ u8 *aml_end;
+ u8 *pkg_start;
+ u8 *pkg_end;
+ union acpi_parse_object *start_op;
+ struct acpi_namespace_node *start_node;
+ union acpi_generic_state *scope;
+ union acpi_parse_object *start_scope;
+ u32 aml_size;
+};
+
+typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **);
+
+typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *);
+
+struct acpi_opcode_info;
+
+struct acpi_walk_state {
+ struct acpi_walk_state *next;
+ u8 descriptor_type;
+ u8 walk_type;
+ u16 opcode;
+ u8 next_op_info;
+ u8 num_operands;
+ u8 operand_index;
+ acpi_owner_id owner_id;
+ u8 last_predicate;
+ u8 current_result;
+ u8 return_used;
+ u8 scope_depth;
+ u8 pass_number;
+ u8 namespace_override;
+ u8 result_size;
+ u8 result_count;
+ u8 *aml;
+ u32 arg_types;
+ u32 method_breakpoint;
+ u32 user_breakpoint;
+ u32 parse_flags;
+ struct acpi_parse_state parser_state;
+ u32 prev_arg_types;
+ u32 arg_count;
+ u16 method_nesting_depth;
+ u8 method_is_nested;
+ struct acpi_namespace_node arguments[7];
+ struct acpi_namespace_node local_variables[8];
+ union acpi_operand_object *operands[9];
+ union acpi_operand_object **params;
+ u8 *aml_last_while;
+ union acpi_operand_object **caller_return_desc;
+ union acpi_generic_state *control_state;
+ struct acpi_namespace_node *deferred_node;
+ union acpi_operand_object *implicit_return_obj;
+ struct acpi_namespace_node *method_call_node;
+ union acpi_parse_object *method_call_op;
+ union acpi_operand_object *method_desc;
+ struct acpi_namespace_node *method_node;
+ char *method_pathname;
+ union acpi_parse_object *op;
+ const struct acpi_opcode_info *op_info;
+ union acpi_parse_object *origin;
+ union acpi_operand_object *result_obj;
+ union acpi_generic_state *results;
+ union acpi_operand_object *return_desc;
+ union acpi_generic_state *scope_info;
+ union acpi_parse_object *prev_op;
+ union acpi_parse_object *next_op;
+ struct acpi_thread_state *thread;
+ acpi_parse_downwards descending_callback;
+ acpi_parse_upwards ascending_callback;
+};
+
+struct acpi_sci_handler_info {
+ struct acpi_sci_handler_info *next;
+ acpi_sci_handler address;
+ void *context;
+};
+
+struct acpi_gpe_handler_info {
+ acpi_gpe_handler address;
+ void *context;
+ struct acpi_namespace_node *method_node;
+ u8 original_flags;
+ u8 originally_enabled;
+};
+
+struct acpi_gpe_notify_info {
+ struct acpi_namespace_node *device_node;
+ struct acpi_gpe_notify_info *next;
+};
+
+union acpi_gpe_dispatch_info {
+ struct acpi_namespace_node *method_node;
+ struct acpi_gpe_handler_info *handler;
+ struct acpi_gpe_notify_info *notify_list;
+};
+
+struct acpi_gpe_register_info;
+
+struct acpi_gpe_event_info {
+ union acpi_gpe_dispatch_info dispatch;
+ struct acpi_gpe_register_info *register_info;
+ u8 flags;
+ u8 gpe_number;
+ u8 runtime_count;
+ u8 disable_for_dispatch;
+};
+
+struct acpi_gpe_register_info {
+ struct acpi_generic_address status_address;
+ struct acpi_generic_address enable_address;
+ u16 base_gpe_number;
+ u8 enable_for_wake;
+ u8 enable_for_run;
+ u8 mask_for_run;
+ u8 enable_mask;
+} __attribute__((packed));
+
+struct acpi_gpe_xrupt_info;
+
+struct acpi_gpe_block_info {
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_block_info *previous;
+ struct acpi_gpe_block_info *next;
+ struct acpi_gpe_xrupt_info *xrupt_block;
+ struct acpi_gpe_register_info *register_info;
+ struct acpi_gpe_event_info *event_info;
+ u64 address;
+ u32 register_count;
+ u16 gpe_count;
+ u16 block_base_number;
+ u8 space_id;
+ u8 initialized;
+};
+
+struct acpi_gpe_xrupt_info {
+ struct acpi_gpe_xrupt_info *previous;
+ struct acpi_gpe_xrupt_info *next;
+ struct acpi_gpe_block_info *gpe_block_list_head;
+ u32 interrupt_number;
+};
+
+struct acpi_fixed_event_handler {
+ acpi_event_handler handler;
+ void *context;
+};
+
+struct acpi_fixed_event_info {
+ u8 status_register_id;
+ u8 enable_register_id;
+ u16 status_bit_mask;
+ u16 enable_bit_mask;
+};
+
+struct acpi_common_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+};
+
+struct acpi_update_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ union acpi_operand_object *object;
+};
+
+struct acpi_pkg_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ u32 index;
+ union acpi_operand_object *source_object;
+ union acpi_operand_object *dest_object;
+ struct acpi_walk_state *walk_state;
+ void *this_target_obj;
+ u32 num_packages;
+};
+
+struct acpi_control_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ u16 opcode;
+ union acpi_parse_object *predicate_op;
+ u8 *aml_predicate_start;
+ u8 *package_end;
+ u64 loop_timeout;
+};
+
+union acpi_parse_value {
+ u64 integer;
+ u32 size;
+ char *string;
+ u8 *buffer;
+ char *name;
+ union acpi_parse_object *arg;
+};
+
+struct acpi_parse_obj_common {
+ union acpi_parse_object *parent;
+ u8 descriptor_type;
+ u8 flags;
+ u16 aml_opcode;
+ u8 *aml;
+ union acpi_parse_object *next;
+ struct acpi_namespace_node *node;
+ union acpi_parse_value value;
+ u8 arg_list_length;
+};
+
+struct acpi_parse_obj_named {
+ union acpi_parse_object *parent;
+ u8 descriptor_type;
+ u8 flags;
+ u16 aml_opcode;
+ u8 *aml;
+ union acpi_parse_object *next;
+ struct acpi_namespace_node *node;
+ union acpi_parse_value value;
+ u8 arg_list_length;
+ char *path;
+ u8 *data;
+ u32 length;
+ u32 name;
+};
+
+struct acpi_parse_obj_asl {
+ union acpi_parse_object *parent;
+ u8 descriptor_type;
+ u8 flags;
+ u16 aml_opcode;
+ u8 *aml;
+ union acpi_parse_object *next;
+ struct acpi_namespace_node *node;
+ union acpi_parse_value value;
+ u8 arg_list_length;
+ union acpi_parse_object *child;
+ union acpi_parse_object *parent_method;
+ char *filename;
+ u8 file_changed;
+ char *parent_filename;
+ char *external_name;
+ char *namepath;
+ char name_seg[4];
+ u32 extra_value;
+ u32 column;
+ u32 line_number;
+ u32 logical_line_number;
+ u32 logical_byte_offset;
+ u32 end_line;
+ u32 end_logical_line;
+ u32 acpi_btype;
+ u32 aml_length;
+ u32 aml_subtree_length;
+ u32 final_aml_length;
+ u32 final_aml_offset;
+ u32 compile_flags;
+ u16 parse_opcode;
+ u8 aml_opcode_length;
+ u8 aml_pkg_len_bytes;
+ u8 extra;
+ char parse_op_name[20];
+};
+
+union acpi_parse_object {
+ struct acpi_parse_obj_common common;
+ struct acpi_parse_obj_named named;
+ struct acpi_parse_obj_asl asl;
+};
+
+struct acpi_scope_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ struct acpi_namespace_node *node;
+};
+
+struct acpi_pscope_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ u32 arg_count;
+ union acpi_parse_object *op;
+ u8 *arg_end;
+ u8 *pkg_end;
+ u32 arg_list;
+};
+
+struct acpi_thread_state {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ u8 current_sync_level;
+ struct acpi_walk_state *walk_state_list;
+ union acpi_operand_object *acquired_mutex_list;
+ u64 thread_id;
+};
+
+struct acpi_result_values {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ union acpi_operand_object *obj_desc[8];
+};
+
+struct acpi_global_notify_handler {
+ acpi_notify_handler handler;
+ void *context;
+};
+
+struct acpi_notify_info {
+ void *next;
+ u8 descriptor_type;
+ u8 flags;
+ u16 value;
+ u16 state;
+ u8 handler_list_id;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *handler_list_head;
+ struct acpi_global_notify_handler *global;
+};
+
+union acpi_generic_state {
+ struct acpi_common_state common;
+ struct acpi_control_state control;
+ struct acpi_update_state update;
+ struct acpi_scope_state scope;
+ struct acpi_pscope_state parse_scope;
+ struct acpi_pkg_state pkg;
+ struct acpi_thread_state thread;
+ struct acpi_result_values results;
+ struct acpi_notify_info notify;
+};
+
+struct acpi_address_range {
+ struct acpi_address_range *next;
+ struct acpi_namespace_node *region_node;
+ acpi_physical_address start_address;
+ acpi_physical_address end_address;
+};
+
+struct acpi_opcode_info {
+ u32 parse_args;
+ u32 runtime_args;
+ u16 flags;
+ u8 object_type;
+ u8 class;
+ u8 type;
+};
+
+struct acpi_comment_node {
+ char *comment;
+ struct acpi_comment_node *next;
+};
+
+struct acpi_bit_register_info {
+ u8 parent_register;
+ u8 bit_position;
+ u16 access_bit_mask;
+};
+
+struct acpi_interface_info {
+ char *name;
+ struct acpi_interface_info *next;
+ u8 flags;
+ u8 value;
+};
+
+struct acpi_os_dpc {
+ acpi_osd_exec_callback function;
+ void *context;
+ struct work_struct work;
+};
+
+struct acpi_ioremap {
+ struct list_head list;
+ void *virt;
+ acpi_physical_address phys;
+ acpi_size size;
+ long unsigned int refcount;
+};
+
+struct acpi_hp_work {
+ struct work_struct work;
+ struct acpi_device *adev;
+ u32 src;
+};
+
+struct acpi_object_list {
+ u32 count;
+ union acpi_object *pointer;
+};
+
+struct acpi_pld_info {
+ u8 revision;
+ u8 ignore_color;
+ u8 red;
+ u8 green;
+ u8 blue;
+ u16 width;
+ u16 height;
+ u8 user_visible;
+ u8 dock;
+ u8 lid;
+ u8 panel;
+ u8 vertical_position;
+ u8 horizontal_position;
+ u8 shape;
+ u8 group_orientation;
+ u8 group_token;
+ u8 group_position;
+ u8 bay;
+ u8 ejectable;
+ u8 ospm_eject_required;
+ u8 cabinet_number;
+ u8 card_cage_number;
+ u8 reference;
+ u8 rotation;
+ u8 order;
+ u8 reserved;
+ u16 vertical_offset;
+ u16 horizontal_offset;
+};
+
+struct acpi_handle_list {
+ u32 count;
+ acpi_handle handles[10];
+};
+
+struct acpi_device_bus_id {
+ char bus_id[15];
+ unsigned int instance_no;
+ struct list_head node;
+};
+
+struct acpi_dev_match_info {
+ struct acpi_device_id hid[2];
+ const char *uid;
+ s64 hrv;
+};
+
+struct nvs_region {
+ __u64 phys_start;
+ __u64 size;
+ struct list_head node;
+};
+
+struct acpi_wakeup_handler {
+ struct list_head list_node;
+ bool (*wakeup)(void *);
+ void *context;
+};
+
+struct acpi_hardware_id {
+ struct list_head list;
+ const char *id;
+};
+
+struct acpi_data_node {
+ const char *name;
+ acpi_handle handle;
+ struct fwnode_handle fwnode;
+ struct fwnode_handle *parent;
+ struct acpi_device_data data;
+ struct list_head sibling;
+ struct kobject kobj;
+ struct completion kobj_done;
+};
+
+struct acpi_data_node_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct acpi_data_node *, char *);
+ ssize_t (*store)(struct acpi_data_node *, const char *, size_t);
+};
+
+enum acpi_bus_device_type {
+ ACPI_BUS_TYPE_DEVICE = 0,
+ ACPI_BUS_TYPE_POWER = 1,
+ ACPI_BUS_TYPE_PROCESSOR = 2,
+ ACPI_BUS_TYPE_THERMAL = 3,
+ ACPI_BUS_TYPE_POWER_BUTTON = 4,
+ ACPI_BUS_TYPE_SLEEP_BUTTON = 5,
+ ACPI_BUS_TYPE_ECDT_EC = 6,
+ ACPI_BUS_DEVICE_TYPE_COUNT = 7,
+};
+
+struct acpi_device_physical_node {
+ unsigned int node_id;
+ struct list_head node;
+ struct device *dev;
+ bool put_online: 1;
+};
+
+struct acpi_osc_context {
+ char *uuid_str;
+ int rev;
+ struct acpi_buffer cap;
+ struct acpi_buffer ret;
+};
+
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED = 0,
+ DEV_DMA_NON_COHERENT = 1,
+ DEV_DMA_COHERENT = 2,
+};
+
+struct acpi_pnp_device_id {
+ u32 length;
+ char *string;
+};
+
+struct acpi_pnp_device_id_list {
+ u32 count;
+ u32 list_size;
+ struct acpi_pnp_device_id ids[1];
+};
+
+struct acpi_device_info {
+ u32 info_size;
+ u32 name;
+ acpi_object_type type;
+ u8 param_count;
+ u16 valid;
+ u8 flags;
+ u8 highest_dstates[4];
+ u8 lowest_dstates[5];
+ u64 address;
+ struct acpi_pnp_device_id hardware_id;
+ struct acpi_pnp_device_id unique_id;
+ struct acpi_pnp_device_id class_code;
+ struct acpi_pnp_device_id_list compatible_id_list;
+};
+
+struct acpi_table_spcr {
+ struct acpi_table_header header;
+ u8 interface_type;
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved1;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved2;
+} __attribute__((packed));
+
+struct acpi_table_stao {
+ struct acpi_table_header header;
+ u8 ignore_uart;
+} __attribute__((packed));
+
+struct acpi_resource_irq {
+ u8 descriptor_length;
+ u8 triggering;
+ u8 polarity;
+ u8 shareable;
+ u8 wake_capable;
+ u8 interrupt_count;
+ u8 interrupts[1];
+};
+
+struct acpi_resource_dma {
+ u8 type;
+ u8 bus_master;
+ u8 transfer;
+ u8 channel_count;
+ u8 channels[1];
+};
+
+struct acpi_resource_start_dependent {
+ u8 descriptor_length;
+ u8 compatibility_priority;
+ u8 performance_robustness;
+};
+
+struct acpi_resource_io {
+ u8 io_decode;
+ u8 alignment;
+ u8 address_length;
+ u16 minimum;
+ u16 maximum;
+} __attribute__((packed));
+
+struct acpi_resource_fixed_io {
+ u16 address;
+ u8 address_length;
+} __attribute__((packed));
+
+struct acpi_resource_fixed_dma {
+ u16 request_lines;
+ u16 channels;
+ u8 width;
+} __attribute__((packed));
+
+struct acpi_resource_vendor {
+ u16 byte_length;
+ u8 byte_data[1];
+} __attribute__((packed));
+
+struct acpi_resource_vendor_typed {
+ u16 byte_length;
+ u8 uuid_subtype;
+ u8 uuid[16];
+ u8 byte_data[1];
+};
+
+struct acpi_resource_end_tag {
+ u8 checksum;
+};
+
+struct acpi_resource_memory24 {
+ u8 write_protect;
+ u16 minimum;
+ u16 maximum;
+ u16 alignment;
+ u16 address_length;
+} __attribute__((packed));
+
+struct acpi_resource_memory32 {
+ u8 write_protect;
+ u32 minimum;
+ u32 maximum;
+ u32 alignment;
+ u32 address_length;
+} __attribute__((packed));
+
+struct acpi_resource_fixed_memory32 {
+ u8 write_protect;
+ u32 address;
+ u32 address_length;
+} __attribute__((packed));
+
+struct acpi_memory_attribute {
+ u8 write_protect;
+ u8 caching;
+ u8 range_type;
+ u8 translation;
+};
+
+struct acpi_io_attribute {
+ u8 range_type;
+ u8 translation;
+ u8 translation_type;
+ u8 reserved1;
+};
+
+union acpi_resource_attribute {
+ struct acpi_memory_attribute mem;
+ struct acpi_io_attribute io;
+ u8 type_specific;
+};
+
+struct acpi_resource_label {
+ u16 string_length;
+ char *string_ptr;
+} __attribute__((packed));
+
+struct acpi_resource_source {
+ u8 index;
+ u16 string_length;
+ char *string_ptr;
+} __attribute__((packed));
+
+struct acpi_address16_attribute {
+ u16 granularity;
+ u16 minimum;
+ u16 maximum;
+ u16 translation_offset;
+ u16 address_length;
+};
+
+struct acpi_address32_attribute {
+ u32 granularity;
+ u32 minimum;
+ u32 maximum;
+ u32 translation_offset;
+ u32 address_length;
+};
+
+struct acpi_address64_attribute {
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+};
+
+struct acpi_resource_address {
+ u8 resource_type;
+ u8 producer_consumer;
+ u8 decode;
+ u8 min_address_fixed;
+ u8 max_address_fixed;
+ union acpi_resource_attribute info;
+};
+
+struct acpi_resource_address16 {
+ u8 resource_type;
+ u8 producer_consumer;
+ u8 decode;
+ u8 min_address_fixed;
+ u8 max_address_fixed;
+ union acpi_resource_attribute info;
+ struct acpi_address16_attribute address;
+ struct acpi_resource_source resource_source;
+} __attribute__((packed));
+
+struct acpi_resource_address32 {
+ u8 resource_type;
+ u8 producer_consumer;
+ u8 decode;
+ u8 min_address_fixed;
+ u8 max_address_fixed;
+ union acpi_resource_attribute info;
+ struct acpi_address32_attribute address;
+ struct acpi_resource_source resource_source;
+} __attribute__((packed));
+
+struct acpi_resource_address64 {
+ u8 resource_type;
+ u8 producer_consumer;
+ u8 decode;
+ u8 min_address_fixed;
+ u8 max_address_fixed;
+ union acpi_resource_attribute info;
+ struct acpi_address64_attribute address;
+ struct acpi_resource_source resource_source;
+} __attribute__((packed));
+
+struct acpi_resource_extended_address64 {
+ u8 resource_type;
+ u8 producer_consumer;
+ u8 decode;
+ u8 min_address_fixed;
+ u8 max_address_fixed;
+ union acpi_resource_attribute info;
+ u8 revision_ID;
+ struct acpi_address64_attribute address;
+ u64 type_specific;
+} __attribute__((packed));
+
+struct acpi_resource_extended_irq {
+ u8 producer_consumer;
+ u8 triggering;
+ u8 polarity;
+ u8 shareable;
+ u8 wake_capable;
+ u8 interrupt_count;
+ struct acpi_resource_source resource_source;
+ u32 interrupts[1];
+} __attribute__((packed));
+
+struct acpi_resource_generic_register {
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__((packed));
+
+struct acpi_resource_gpio {
+ u8 revision_id;
+ u8 connection_type;
+ u8 producer_consumer;
+ u8 pin_config;
+ u8 shareable;
+ u8 wake_capable;
+ u8 io_restriction;
+ u8 triggering;
+ u8 polarity;
+ u16 drive_strength;
+ u16 debounce_timeout;
+ u16 pin_table_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u16 *pin_table;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_common_serialbus {
+ u8 revision_id;
+ u8 type;
+ u8 producer_consumer;
+ u8 slave_mode;
+ u8 connection_sharing;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_i2c_serialbus {
+ u8 revision_id;
+ u8 type;
+ u8 producer_consumer;
+ u8 slave_mode;
+ u8 connection_sharing;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u8 *vendor_data;
+ u8 access_mode;
+ u16 slave_address;
+ u32 connection_speed;
+} __attribute__((packed));
+
+struct acpi_resource_spi_serialbus {
+ u8 revision_id;
+ u8 type;
+ u8 producer_consumer;
+ u8 slave_mode;
+ u8 connection_sharing;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u8 *vendor_data;
+ u8 wire_mode;
+ u8 device_polarity;
+ u8 data_bit_length;
+ u8 clock_phase;
+ u8 clock_polarity;
+ u16 device_selection;
+ u32 connection_speed;
+} __attribute__((packed));
+
+struct acpi_resource_uart_serialbus {
+ u8 revision_id;
+ u8 type;
+ u8 producer_consumer;
+ u8 slave_mode;
+ u8 connection_sharing;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u8 *vendor_data;
+ u8 endian;
+ u8 data_bits;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 parity;
+ u8 lines_enabled;
+ u16 rx_fifo_size;
+ u16 tx_fifo_size;
+ u32 default_baud_rate;
+} __attribute__((packed));
+
+struct acpi_resource_pin_function {
+ u8 revision_id;
+ u8 pin_config;
+ u8 shareable;
+ u16 function_number;
+ u16 pin_table_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u16 *pin_table;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_pin_config {
+ u8 revision_id;
+ u8 producer_consumer;
+ u8 shareable;
+ u8 pin_config_type;
+ u32 pin_config_value;
+ u16 pin_table_length;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ u16 *pin_table;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_pin_group {
+ u8 revision_id;
+ u8 producer_consumer;
+ u16 pin_table_length;
+ u16 vendor_length;
+ u16 *pin_table;
+ struct acpi_resource_label resource_label;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_pin_group_function {
+ u8 revision_id;
+ u8 producer_consumer;
+ u8 shareable;
+ u16 function_number;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ struct acpi_resource_label resource_source_label;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+struct acpi_resource_pin_group_config {
+ u8 revision_id;
+ u8 producer_consumer;
+ u8 shareable;
+ u8 pin_config_type;
+ u32 pin_config_value;
+ u16 vendor_length;
+ struct acpi_resource_source resource_source;
+ struct acpi_resource_label resource_source_label;
+ u8 *vendor_data;
+} __attribute__((packed));
+
+union acpi_resource_data {
+ struct acpi_resource_irq irq;
+ struct acpi_resource_dma dma;
+ struct acpi_resource_start_dependent start_dpf;
+ struct acpi_resource_io io;
+ struct acpi_resource_fixed_io fixed_io;
+ struct acpi_resource_fixed_dma fixed_dma;
+ struct acpi_resource_vendor vendor;
+ struct acpi_resource_vendor_typed vendor_typed;
+ struct acpi_resource_end_tag end_tag;
+ struct acpi_resource_memory24 memory24;
+ struct acpi_resource_memory32 memory32;
+ struct acpi_resource_fixed_memory32 fixed_memory32;
+ struct acpi_resource_address16 address16;
+ struct acpi_resource_address32 address32;
+ struct acpi_resource_address64 address64;
+ struct acpi_resource_extended_address64 ext_address64;
+ struct acpi_resource_extended_irq extended_irq;
+ struct acpi_resource_generic_register generic_reg;
+ struct acpi_resource_gpio gpio;
+ struct acpi_resource_i2c_serialbus i2c_serial_bus;
+ struct acpi_resource_spi_serialbus spi_serial_bus;
+ struct acpi_resource_uart_serialbus uart_serial_bus;
+ struct acpi_resource_common_serialbus common_serial_bus;
+ struct acpi_resource_pin_function pin_function;
+ struct acpi_resource_pin_config pin_config;
+ struct acpi_resource_pin_group pin_group;
+ struct acpi_resource_pin_group_function pin_group_function;
+ struct acpi_resource_pin_group_config pin_group_config;
+ struct acpi_resource_address address;
+};
+
+struct acpi_resource {
+ u32 type;
+ u32 length;
+ union acpi_resource_data data;
+} __attribute__((packed));
+
+enum acpi_reconfig_event {
+ ACPI_RECONFIG_DEVICE_ADD = 0,
+ ACPI_RECONFIG_DEVICE_REMOVE = 1,
+};
+
+struct acpi_probe_entry;
+
+typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *);
+
+struct acpi_probe_entry {
+ __u8 id[5];
+ __u8 type;
+ acpi_probe_entry_validate_subtbl subtable_valid;
+ union {
+ acpi_tbl_table_handler probe_table;
+ acpi_tbl_entry_handler probe_subtbl;
+ };
+ kernel_ulong_t driver_data;
+};
+
+struct acpi_dep_data {
+ struct list_head node;
+ acpi_handle master;
+ acpi_handle slave;
+};
+
+struct acpi_table_events_work {
+ struct work_struct work;
+ void *table;
+ u32 event;
+};
+
+struct resource_win {
+ struct resource res;
+ resource_size_t offset;
+};
+
+struct res_proc_context {
+ struct list_head *list;
+ int (*preproc)(struct acpi_resource *, void *);
+ void *preproc_data;
+ int count;
+ int error;
+};
+
+typedef u32 acpi_event_status;
+
+struct acpi_table_ecdt {
+ struct acpi_table_header header;
+ struct acpi_generic_address control;
+ struct acpi_generic_address data;
+ u32 uid;
+ u8 gpe;
+ u8 id[1];
+} __attribute__((packed));
+
+struct transaction {
+ const u8 *wdata;
+ u8 *rdata;
+ short unsigned int irq_count;
+ u8 command;
+ u8 wi;
+ u8 ri;
+ u8 wlen;
+ u8 rlen;
+ u8 flags;
+};
+
+typedef int (*acpi_ec_query_func)(void *);
+
+enum ec_command {
+ ACPI_EC_COMMAND_READ = 128,
+ ACPI_EC_COMMAND_WRITE = 129,
+ ACPI_EC_BURST_ENABLE = 130,
+ ACPI_EC_BURST_DISABLE = 131,
+ ACPI_EC_COMMAND_QUERY = 132,
+};
+
+enum {
+ EC_FLAGS_QUERY_ENABLED = 0,
+ EC_FLAGS_QUERY_PENDING = 1,
+ EC_FLAGS_QUERY_GUARDING = 2,
+ EC_FLAGS_EVENT_HANDLER_INSTALLED = 3,
+ EC_FLAGS_EC_HANDLER_INSTALLED = 4,
+ EC_FLAGS_QUERY_METHODS_INSTALLED = 5,
+ EC_FLAGS_STARTED = 6,
+ EC_FLAGS_STOPPED = 7,
+ EC_FLAGS_EVENTS_MASKED = 8,
+};
+
+struct acpi_ec_query_handler {
+ struct list_head node;
+ acpi_ec_query_func func;
+ acpi_handle handle;
+ void *data;
+ u8 query_bit;
+ struct kref kref;
+};
+
+struct acpi_ec_query {
+ struct transaction transaction;
+ struct work_struct work;
+ struct acpi_ec_query_handler *handler;
+};
+
+struct acpi_pci_root_ops;
+
+struct acpi_pci_root_info {
+ struct acpi_pci_root *root;
+ struct acpi_device *bridge;
+ struct acpi_pci_root_ops *ops;
+ struct list_head resources;
+ char name[16];
+};
+
+struct acpi_pci_root_ops {
+ struct pci_ops *pci_ops;
+ int (*init_info)(struct acpi_pci_root_info *);
+ void (*release_info)(struct acpi_pci_root_info *);
+ int (*prepare_resources)(struct acpi_pci_root_info *);
+};
+
+struct pci_osc_bit_struct {
+ u32 bit;
+ char *desc;
+};
+
+struct acpi_handle_node {
+ struct list_head node;
+ acpi_handle handle;
+};
+
+struct acpi_pci_link_irq {
+ u32 active;
+ u8 triggering;
+ u8 polarity;
+ u8 resource_type;
+ u8 possible_count;
+ u32 possible[16];
+ u8 initialized: 1;
+ u8 reserved: 7;
+};
+
+struct acpi_pci_link {
+ struct list_head list;
+ struct acpi_device *device;
+ struct acpi_pci_link_irq irq;
+ int refcnt;
+};
+
+struct acpi_pci_routing_table {
+ u32 length;
+ u32 pin;
+ u64 address;
+ u32 source_index;
+ char source[4];
+};
+
+struct acpi_prt_entry {
+ struct acpi_pci_id id;
+ u8 pin;
+ acpi_handle link;
+ u32 index;
+};
+
+struct prt_quirk {
+ const struct dmi_system_id *system;
+ unsigned int segment;
+ unsigned int bus;
+ unsigned int device;
+ unsigned char pin;
+ const char *source;
+ const char *actual_source;
+};
+
+struct apd_private_data;
+
+struct apd_device_desc {
+ unsigned int flags;
+ unsigned int fixed_clk_rate;
+ struct property_entry *properties;
+ int (*setup)(struct apd_private_data *);
+};
+
+struct apd_private_data {
+ struct clk *clk;
+ struct acpi_device *adev;
+ const struct apd_device_desc *dev_desc;
+};
+
+struct acpi_power_dependent_device {
+ struct device *dev;
+ struct list_head node;
+};
+
+struct acpi_power_resource {
+ struct acpi_device device;
+ struct list_head list_node;
+ char *name;
+ u32 system_level;
+ u32 order;
+ unsigned int ref_count;
+ bool wakeup_enabled;
+ struct mutex resource_lock;
+ struct list_head dependents;
+};
+
+struct acpi_power_resource_entry {
+ struct list_head node;
+ struct acpi_power_resource *resource;
+};
+
+struct acpi_bus_event {
+ struct list_head node;
+ acpi_device_class device_class;
+ acpi_bus_id bus_id;
+ u32 type;
+ u32 data;
+};
+
+struct genlmsghdr {
+ __u8 cmd;
+ __u8 version;
+ __u16 reserved;
+};
+
+struct genl_multicast_group {
+ char name[16];
+};
+
+struct genl_ops;
+
+struct genl_info;
+
+struct genl_family {
+ int id;
+ unsigned int hdrsize;
+ char name[16];
+ unsigned int version;
+ unsigned int maxattr;
+ bool netnsok;
+ bool parallel_ops;
+ const struct nla_policy *policy;
+ int (*pre_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *);
+ void (*post_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *);
+ int (*mcast_bind)(struct net *, int);
+ void (*mcast_unbind)(struct net *, int);
+ struct nlattr **attrbuf;
+ const struct genl_ops *ops;
+ const struct genl_multicast_group *mcgrps;
+ unsigned int n_ops;
+ unsigned int n_mcgrps;
+ unsigned int mcgrp_offset;
+ struct module *module;
+};
+
+struct genl_ops {
+ int (*doit)(struct sk_buff *, struct genl_info *);
+ int (*start)(struct netlink_callback *);
+ int (*dumpit)(struct sk_buff *, struct netlink_callback *);
+ int (*done)(struct netlink_callback *);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
+};
+
+struct genl_info {
+ u32 snd_seq;
+ u32 snd_portid;
+ struct nlmsghdr *nlhdr;
+ struct genlmsghdr *genlhdr;
+ void *userhdr;
+ struct nlattr **attrs;
+ possible_net_t _net;
+ void *user_ptr[2];
+ struct netlink_ext_ack *extack;
+};
+
+struct acpi_genl_event {
+ acpi_device_class device_class;
+ char bus_id[15];
+ u32 type;
+ u32 data;
+};
+
+enum {
+ ACPI_GENL_ATTR_UNSPEC = 0,
+ ACPI_GENL_ATTR_EVENT = 1,
+ __ACPI_GENL_ATTR_MAX = 2,
+};
+
+enum {
+ ACPI_GENL_CMD_UNSPEC = 0,
+ ACPI_GENL_CMD_EVENT = 1,
+ __ACPI_GENL_CMD_MAX = 2,
+};
+
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
+struct acpi_ged_event {
+ struct list_head node;
+ struct device *dev;
+ unsigned int gsi;
+ unsigned int irq;
+ acpi_handle handle;
+};
+
+struct acpi_table_bert {
+ struct acpi_table_header header;
+ u32 region_length;
+ u64 address;
+};
+
+struct acpi_table_attr {
+ struct bin_attribute attr;
+ char name[4];
+ int instance;
+ char filename[8];
+ struct list_head node;
+};
+
+struct acpi_data_attr {
+ struct bin_attribute attr;
+ u64 addr;
+};
+
+struct acpi_data_obj {
+ char *name;
+ int (*fn)(void *, struct acpi_data_attr *);
+};
+
+struct event_counter {
+ u32 count;
+ u32 flags;
+};
+
+struct acpi_device_properties {
+ const guid_t *guid;
+ const union acpi_object *properties;
+ struct list_head list;
+};
+
+struct always_present_id {
+ struct acpi_device_id hid[2];
+ struct x86_cpu_id cpu_ids[2];
+ struct dmi_system_id dmi_ids[2];
+ const char *uid;
+};
+
+struct acpi_lpat {
+ int temp;
+ int raw;
+};
+
+struct acpi_lpat_conversion_table {
+ struct acpi_lpat *lpat;
+ int lpat_count;
+};
+
+struct acpi_table_lpit {
+ struct acpi_table_header header;
+};
+
+struct acpi_lpit_header {
+ u32 type;
+ u32 length;
+ u16 unique_id;
+ u16 reserved;
+ u32 flags;
+};
+
+struct acpi_lpit_native {
+ struct acpi_lpit_header header;
+ struct acpi_generic_address entry_trigger;
+ u32 residency;
+ u32 latency;
+ struct acpi_generic_address residency_counter;
+ u64 counter_frequency;
+} __attribute__((packed));
+
+struct lpit_residency_info {
+ struct acpi_generic_address gaddr;
+ u64 frequency;
+ void *iomem_addr;
+};
+
+enum {
+ ACPI_REFCLASS_LOCAL = 0,
+ ACPI_REFCLASS_ARG = 1,
+ ACPI_REFCLASS_REFOF = 2,
+ ACPI_REFCLASS_INDEX = 3,
+ ACPI_REFCLASS_TABLE = 4,
+ ACPI_REFCLASS_NAME = 5,
+ ACPI_REFCLASS_DEBUG = 6,
+ ACPI_REFCLASS_MAX = 6,
+};
+
+struct acpi_common_descriptor {
+ void *common_pointer;
+ u8 descriptor_type;
+};
+
+union acpi_descriptor {
+ struct acpi_common_descriptor common;
+ union acpi_operand_object object;
+ struct acpi_namespace_node node;
+ union acpi_parse_object op;
+};
+
+struct acpi_create_field_info {
+ struct acpi_namespace_node *region_node;
+ struct acpi_namespace_node *field_node;
+ struct acpi_namespace_node *register_node;
+ struct acpi_namespace_node *data_register_node;
+ struct acpi_namespace_node *connection_node;
+ u8 *resource_buffer;
+ u32 bank_value;
+ u32 field_bit_position;
+ u32 field_bit_length;
+ u16 resource_length;
+ u16 pin_number_index;
+ u8 field_flags;
+ u8 attribute;
+ u8 field_type;
+ u8 access_length;
+};
+
+struct acpi_init_walk_info {
+ u32 table_index;
+ u32 object_count;
+ u32 method_count;
+ u32 serial_method_count;
+ u32 non_serial_method_count;
+ u32 serialized_method_count;
+ u32 device_count;
+ u32 op_region_count;
+ u32 field_count;
+ u32 buffer_count;
+ u32 package_count;
+ u32 op_region_init;
+ u32 field_init;
+ u32 buffer_init;
+ u32 package_init;
+ acpi_owner_id owner_id;
+};
+
+struct acpi_name_info {
+ char name[4];
+ u16 argument_list;
+ u8 expected_btypes;
+} __attribute__((packed));
+
+struct acpi_package_info {
+ u8 type;
+ u8 object_type1;
+ u8 count1;
+ u8 object_type2;
+ u8 count2;
+ u16 reserved;
+} __attribute__((packed));
+
+struct acpi_package_info2 {
+ u8 type;
+ u8 count;
+ u8 object_type[4];
+ u8 reserved;
+};
+
+struct acpi_package_info3 {
+ u8 type;
+ u8 count;
+ u8 object_type[2];
+ u8 tail_object_type;
+ u16 reserved;
+} __attribute__((packed));
+
+struct acpi_package_info4 {
+ u8 type;
+ u8 object_type1;
+ u8 count1;
+ u8 sub_object_types;
+ u8 pkg_count;
+ u16 reserved;
+} __attribute__((packed));
+
+union acpi_predefined_info {
+ struct acpi_name_info info;
+ struct acpi_package_info ret_info;
+ struct acpi_package_info2 ret_info2;
+ struct acpi_package_info3 ret_info3;
+ struct acpi_package_info4 ret_info4;
+};
+
+struct acpi_evaluate_info {
+ struct acpi_namespace_node *prefix_node;
+ const char *relative_pathname;
+ union acpi_operand_object **parameters;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *obj_desc;
+ char *full_pathname;
+ const union acpi_predefined_info *predefined;
+ union acpi_operand_object *return_object;
+ union acpi_operand_object *parent_package;
+ u32 return_flags;
+ u32 return_btype;
+ u16 param_count;
+ u16 node_flags;
+ u8 pass_number;
+ u8 return_object_type;
+ u8 flags;
+};
+
+enum {
+ AML_FIELD_ACCESS_ANY = 0,
+ AML_FIELD_ACCESS_BYTE = 1,
+ AML_FIELD_ACCESS_WORD = 2,
+ AML_FIELD_ACCESS_DWORD = 3,
+ AML_FIELD_ACCESS_QWORD = 4,
+ AML_FIELD_ACCESS_BUFFER = 5,
+};
+
+typedef enum {
+ ACPI_IMODE_LOAD_PASS1 = 1,
+ ACPI_IMODE_LOAD_PASS2 = 2,
+ ACPI_IMODE_EXECUTE = 3,
+} acpi_interpreter_mode;
+
+typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *);
+
+struct acpi_gpe_walk_info {
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ u16 count;
+ acpi_owner_id owner_id;
+ u8 execute_by_owner_id;
+};
+
+struct acpi_gpe_device_info {
+ u32 index;
+ u32 next_block_base_index;
+ acpi_status status;
+ struct acpi_namespace_node *gpe_device;
+};
+
+typedef acpi_status (*acpi_gpe_callback)(struct acpi_gpe_xrupt_info *, struct acpi_gpe_block_info *, void *);
+
+struct acpi_connection_info {
+ u8 *connection;
+ u16 length;
+ u8 access_length;
+};
+
+struct acpi_reg_walk_info {
+ u32 function;
+ u32 reg_run_count;
+ acpi_adr_space_type space_id;
+};
+
+enum {
+ AML_FIELD_UPDATE_PRESERVE = 0,
+ AML_FIELD_UPDATE_WRITE_AS_ONES = 32,
+ AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64,
+};
+
+struct acpi_signal_fatal_info {
+ u32 type;
+ u32 code;
+ u32 argument;
+};
+
+enum {
+ MATCH_MTR = 0,
+ MATCH_MEQ = 1,
+ MATCH_MLE = 2,
+ MATCH_MLT = 3,
+ MATCH_MGE = 4,
+ MATCH_MGT = 5,
+};
+
+enum {
+ AML_FIELD_ATTRIB_QUICK = 2,
+ AML_FIELD_ATTRIB_SEND_RECEIVE = 4,
+ AML_FIELD_ATTRIB_BYTE = 6,
+ AML_FIELD_ATTRIB_WORD = 8,
+ AML_FIELD_ATTRIB_BLOCK = 10,
+ AML_FIELD_ATTRIB_BYTES = 11,
+ AML_FIELD_ATTRIB_PROCESS_CALL = 12,
+ AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 13,
+ AML_FIELD_ATTRIB_RAW_BYTES = 14,
+ AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 15,
+};
+
+typedef enum {
+ ACPI_TRACE_AML_METHOD = 0,
+ ACPI_TRACE_AML_OPCODE = 1,
+ ACPI_TRACE_AML_REGION = 2,
+} acpi_trace_event_type;
+
+struct acpi_gpe_block_status_context {
+ struct acpi_gpe_register_info *gpe_skip_register_info;
+ u8 gpe_skip_mask;
+ u8 retval;
+};
+
+struct acpi_port_info {
+ char *name;
+ u16 start;
+ u16 end;
+ u8 osi_dependency;
+};
+
+struct acpi_pci_device {
+ acpi_handle device;
+ struct acpi_pci_device *next;
+};
+
+struct acpi_device_walk_info {
+ struct acpi_table_desc *table_desc;
+ struct acpi_evaluate_info *evaluate_info;
+ u32 device_count;
+ u32 num_STA;
+ u32 num_INI;
+};
+
+enum acpi_return_package_types {
+ ACPI_PTYPE1_FIXED = 1,
+ ACPI_PTYPE1_VAR = 2,
+ ACPI_PTYPE1_OPTION = 3,
+ ACPI_PTYPE2 = 4,
+ ACPI_PTYPE2_COUNT = 5,
+ ACPI_PTYPE2_PKG_COUNT = 6,
+ ACPI_PTYPE2_FIXED = 7,
+ ACPI_PTYPE2_MIN = 8,
+ ACPI_PTYPE2_REV_FIXED = 9,
+ ACPI_PTYPE2_FIX_VAR = 10,
+ ACPI_PTYPE2_VAR_VAR = 11,
+ ACPI_PTYPE2_UUID_PAIR = 12,
+ ACPI_PTYPE_CUSTOM = 13,
+};
+
+typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **);
+
+struct acpi_simple_repair_info {
+ char name[4];
+ u32 unexpected_btypes;
+ u32 package_index;
+ acpi_object_converter object_converter;
+};
+
+typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **);
+
+struct acpi_repair_info {
+ char name[4];
+ acpi_repair_function repair_function;
+};
+
+struct acpi_namestring_info {
+ const char *external_name;
+ const char *next_external_char;
+ char *internal_name;
+ u32 length;
+ u32 num_segments;
+ u32 num_carats;
+ u8 fully_qualified;
+};
+
+typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **);
+
+struct acpi_get_devices_info {
+ acpi_walk_callback user_function;
+ void *context;
+ const char *hid;
+};
+
+struct aml_resource_small_header {
+ u8 descriptor_type;
+};
+
+struct aml_resource_irq {
+ u8 descriptor_type;
+ u16 irq_mask;
+ u8 flags;
+} __attribute__((packed));
+
+struct aml_resource_dma {
+ u8 descriptor_type;
+ u8 dma_channel_mask;
+ u8 flags;
+};
+
+struct aml_resource_start_dependent {
+ u8 descriptor_type;
+ u8 flags;
+};
+
+struct aml_resource_end_dependent {
+ u8 descriptor_type;
+};
+
+struct aml_resource_io {
+ u8 descriptor_type;
+ u8 flags;
+ u16 minimum;
+ u16 maximum;
+ u8 alignment;
+ u8 address_length;
+};
+
+struct aml_resource_fixed_io {
+ u8 descriptor_type;
+ u16 address;
+ u8 address_length;
+} __attribute__((packed));
+
+struct aml_resource_vendor_small {
+ u8 descriptor_type;
+};
+
+struct aml_resource_end_tag {
+ u8 descriptor_type;
+ u8 checksum;
+};
+
+struct aml_resource_fixed_dma {
+ u8 descriptor_type;
+ u16 request_lines;
+ u16 channels;
+ u8 width;
+} __attribute__((packed));
+
+struct aml_resource_large_header {
+ u8 descriptor_type;
+ u16 resource_length;
+} __attribute__((packed));
+
+struct aml_resource_memory24 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 flags;
+ u16 minimum;
+ u16 maximum;
+ u16 alignment;
+ u16 address_length;
+} __attribute__((packed));
+
+struct aml_resource_vendor_large {
+ u8 descriptor_type;
+ u16 resource_length;
+} __attribute__((packed));
+
+struct aml_resource_memory32 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 flags;
+ u32 minimum;
+ u32 maximum;
+ u32 alignment;
+ u32 address_length;
+} __attribute__((packed));
+
+struct aml_resource_fixed_memory32 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 flags;
+ u32 address;
+ u32 address_length;
+} __attribute__((packed));
+
+struct aml_resource_address {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 resource_type;
+ u8 flags;
+ u8 specific_flags;
+} __attribute__((packed));
+
+struct aml_resource_extended_address64 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 resource_type;
+ u8 flags;
+ u8 specific_flags;
+ u8 revision_ID;
+ u8 reserved;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+ u64 type_specific;
+} __attribute__((packed));
+
+struct aml_resource_address64 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 resource_type;
+ u8 flags;
+ u8 specific_flags;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+} __attribute__((packed));
+
+struct aml_resource_address32 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 resource_type;
+ u8 flags;
+ u8 specific_flags;
+ u32 granularity;
+ u32 minimum;
+ u32 maximum;
+ u32 translation_offset;
+ u32 address_length;
+} __attribute__((packed));
+
+struct aml_resource_address16 {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 resource_type;
+ u8 flags;
+ u8 specific_flags;
+ u16 granularity;
+ u16 minimum;
+ u16 maximum;
+ u16 translation_offset;
+ u16 address_length;
+} __attribute__((packed));
+
+struct aml_resource_extended_irq {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 flags;
+ u8 interrupt_count;
+ u32 interrupts[1];
+} __attribute__((packed));
+
+struct aml_resource_generic_register {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 address_space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__((packed));
+
+struct aml_resource_gpio {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u8 connection_type;
+ u16 flags;
+ u16 int_flags;
+ u8 pin_config;
+ u16 drive_strength;
+ u16 debounce_timeout;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+struct aml_resource_common_serialbus {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u8 res_source_index;
+ u8 type;
+ u8 flags;
+ u16 type_specific_flags;
+ u8 type_revision_id;
+ u16 type_data_length;
+} __attribute__((packed));
+
+struct aml_resource_i2c_serialbus {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u8 res_source_index;
+ u8 type;
+ u8 flags;
+ u16 type_specific_flags;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u32 connection_speed;
+ u16 slave_address;
+} __attribute__((packed));
+
+struct aml_resource_spi_serialbus {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u8 res_source_index;
+ u8 type;
+ u8 flags;
+ u16 type_specific_flags;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u32 connection_speed;
+ u8 data_bit_length;
+ u8 clock_phase;
+ u8 clock_polarity;
+ u16 device_selection;
+} __attribute__((packed));
+
+struct aml_resource_uart_serialbus {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u8 res_source_index;
+ u8 type;
+ u8 flags;
+ u16 type_specific_flags;
+ u8 type_revision_id;
+ u16 type_data_length;
+ u32 default_baud_rate;
+ u16 rx_fifo_size;
+ u16 tx_fifo_size;
+ u8 parity;
+ u8 lines_enabled;
+} __attribute__((packed));
+
+struct aml_resource_pin_function {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u16 flags;
+ u8 pin_config;
+ u16 function_number;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+struct aml_resource_pin_config {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u16 flags;
+ u8 pin_config_type;
+ u32 pin_config_value;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+struct aml_resource_pin_group {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u16 flags;
+ u16 pin_table_offset;
+ u16 label_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+struct aml_resource_pin_group_function {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u16 flags;
+ u16 function_number;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 res_source_label_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+struct aml_resource_pin_group_config {
+ u8 descriptor_type;
+ u16 resource_length;
+ u8 revision_id;
+ u16 flags;
+ u8 pin_config_type;
+ u32 pin_config_value;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 res_source_label_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+} __attribute__((packed));
+
+union aml_resource {
+ u8 descriptor_type;
+ struct aml_resource_small_header small_header;
+ struct aml_resource_large_header large_header;
+ struct aml_resource_irq irq;
+ struct aml_resource_dma dma;
+ struct aml_resource_start_dependent start_dpf;
+ struct aml_resource_end_dependent end_dpf;
+ struct aml_resource_io io;
+ struct aml_resource_fixed_io fixed_io;
+ struct aml_resource_fixed_dma fixed_dma;
+ struct aml_resource_vendor_small vendor_small;
+ struct aml_resource_end_tag end_tag;
+ struct aml_resource_memory24 memory24;
+ struct aml_resource_generic_register generic_reg;
+ struct aml_resource_vendor_large vendor_large;
+ struct aml_resource_memory32 memory32;
+ struct aml_resource_fixed_memory32 fixed_memory32;
+ struct aml_resource_address16 address16;
+ struct aml_resource_address32 address32;
+ struct aml_resource_address64 address64;
+ struct aml_resource_extended_address64 ext_address64;
+ struct aml_resource_extended_irq extended_irq;
+ struct aml_resource_gpio gpio;
+ struct aml_resource_i2c_serialbus i2c_serial_bus;
+ struct aml_resource_spi_serialbus spi_serial_bus;
+ struct aml_resource_uart_serialbus uart_serial_bus;
+ struct aml_resource_common_serialbus common_serial_bus;
+ struct aml_resource_pin_function pin_function;
+ struct aml_resource_pin_config pin_config;
+ struct aml_resource_pin_group pin_group;
+ struct aml_resource_pin_group_function pin_group_function;
+ struct aml_resource_pin_group_config pin_group_config;
+ struct aml_resource_address address;
+ u32 dword_item;
+ u16 word_item;
+ u8 byte_item;
+};
+
+struct acpi_rsconvert_info {
+ u8 opcode;
+ u8 resource_offset;
+ u8 aml_offset;
+ u8 value;
+};
+
+enum {
+ ACPI_RSC_INITGET = 0,
+ ACPI_RSC_INITSET = 1,
+ ACPI_RSC_FLAGINIT = 2,
+ ACPI_RSC_1BITFLAG = 3,
+ ACPI_RSC_2BITFLAG = 4,
+ ACPI_RSC_3BITFLAG = 5,
+ ACPI_RSC_ADDRESS = 6,
+ ACPI_RSC_BITMASK = 7,
+ ACPI_RSC_BITMASK16 = 8,
+ ACPI_RSC_COUNT = 9,
+ ACPI_RSC_COUNT16 = 10,
+ ACPI_RSC_COUNT_GPIO_PIN = 11,
+ ACPI_RSC_COUNT_GPIO_RES = 12,
+ ACPI_RSC_COUNT_GPIO_VEN = 13,
+ ACPI_RSC_COUNT_SERIAL_RES = 14,
+ ACPI_RSC_COUNT_SERIAL_VEN = 15,
+ ACPI_RSC_DATA8 = 16,
+ ACPI_RSC_EXIT_EQ = 17,
+ ACPI_RSC_EXIT_LE = 18,
+ ACPI_RSC_EXIT_NE = 19,
+ ACPI_RSC_LENGTH = 20,
+ ACPI_RSC_MOVE_GPIO_PIN = 21,
+ ACPI_RSC_MOVE_GPIO_RES = 22,
+ ACPI_RSC_MOVE_SERIAL_RES = 23,
+ ACPI_RSC_MOVE_SERIAL_VEN = 24,
+ ACPI_RSC_MOVE8 = 25,
+ ACPI_RSC_MOVE16 = 26,
+ ACPI_RSC_MOVE32 = 27,
+ ACPI_RSC_MOVE64 = 28,
+ ACPI_RSC_SET8 = 29,
+ ACPI_RSC_SOURCE = 30,
+ ACPI_RSC_SOURCEX = 31,
+};
+
+typedef u16 acpi_rs_length;
+
+typedef u32 acpi_rsdesc_size;
+
+struct acpi_vendor_uuid {
+ u8 subtype;
+ u8 data[16];
+};
+
+typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *);
+
+struct acpi_vendor_walk_info {
+ struct acpi_vendor_uuid *uuid;
+ struct acpi_buffer *buffer;
+ acpi_status status;
+};
+
+struct acpi_fadt_info {
+ const char *name;
+ u16 address64;
+ u16 address32;
+ u16 length;
+ u8 default_length;
+ u8 flags;
+};
+
+struct acpi_fadt_pm_info {
+ struct acpi_generic_address *target;
+ u16 source;
+ u8 register_num;
+};
+
+struct acpi_table_rsdp {
+ char signature[8];
+ u8 checksum;
+ char oem_id[6];
+ u8 revision;
+ u32 rsdt_physical_address;
+ u32 length;
+ u64 xsdt_physical_address;
+ u8 extended_checksum;
+ u8 reserved[3];
+} __attribute__((packed));
+
+struct acpi_pkg_info {
+ u8 *free_space;
+ acpi_size length;
+ u32 object_space;
+ u32 num_packages;
+};
+
+struct acpi_exception_info {
+ char *name;
+};
+
+typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *);
+
+typedef u32 acpi_mutex_handle;
+
+typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **);
+
+struct input_id {
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+struct input_absinfo {
+ __s32 value;
+ __s32 minimum;
+ __s32 maximum;
+ __s32 fuzz;
+ __s32 flat;
+ __s32 resolution;
+};
+
+struct input_keymap_entry {
+ __u8 flags;
+ __u8 len;
+ __u16 index;
+ __u32 keycode;
+ __u8 scancode[32];
+};
+
+struct ff_replay {
+ __u16 length;
+ __u16 delay;
+};
+
+struct ff_trigger {
+ __u16 button;
+ __u16 interval;
+};
+
+struct ff_envelope {
+ __u16 attack_length;
+ __u16 attack_level;
+ __u16 fade_length;
+ __u16 fade_level;
+};
+
+struct ff_constant_effect {
+ __s16 level;
+ struct ff_envelope envelope;
+};
+
+struct ff_ramp_effect {
+ __s16 start_level;
+ __s16 end_level;
+ struct ff_envelope envelope;
+};
+
+struct ff_condition_effect {
+ __u16 right_saturation;
+ __u16 left_saturation;
+ __s16 right_coeff;
+ __s16 left_coeff;
+ __u16 deadband;
+ __s16 center;
+};
+
+struct ff_periodic_effect {
+ __u16 waveform;
+ __u16 period;
+ __s16 magnitude;
+ __s16 offset;
+ __u16 phase;
+ struct ff_envelope envelope;
+ __u32 custom_len;
+ __s16 *custom_data;
+};
+
+struct ff_rumble_effect {
+ __u16 strong_magnitude;
+ __u16 weak_magnitude;
+};
+
+struct ff_effect {
+ __u16 type;
+ __s16 id;
+ __u16 direction;
+ struct ff_trigger trigger;
+ struct ff_replay replay;
+ union {
+ struct ff_constant_effect constant;
+ struct ff_ramp_effect ramp;
+ struct ff_periodic_effect periodic;
+ struct ff_condition_effect condition[2];
+ struct ff_rumble_effect rumble;
+ } u;
+};
+
+struct input_device_id {
+ kernel_ulong_t flags;
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+ kernel_ulong_t evbit[1];
+ kernel_ulong_t keybit[12];
+ kernel_ulong_t relbit[1];
+ kernel_ulong_t absbit[1];
+ kernel_ulong_t mscbit[1];
+ kernel_ulong_t ledbit[1];
+ kernel_ulong_t sndbit[1];
+ kernel_ulong_t ffbit[2];
+ kernel_ulong_t swbit[1];
+ kernel_ulong_t propbit[1];
+ kernel_ulong_t driver_info;
+};
+
+struct input_value {
+ __u16 type;
+ __u16 code;
+ __s32 value;
+};
+
+enum input_clock_type {
+ INPUT_CLK_REAL = 0,
+ INPUT_CLK_MONO = 1,
+ INPUT_CLK_BOOT = 2,
+ INPUT_CLK_MAX = 3,
+};
+
+struct ff_device;
+
+struct input_dev_poller;
+
+struct input_mt;
+
+struct input_handle;
+
+struct input_dev {
+ const char *name;
+ const char *phys;
+ const char *uniq;
+ struct input_id id;
+ long unsigned int propbit[1];
+ long unsigned int evbit[1];
+ long unsigned int keybit[12];
+ long unsigned int relbit[1];
+ long unsigned int absbit[1];
+ long unsigned int mscbit[1];
+ long unsigned int ledbit[1];
+ long unsigned int sndbit[1];
+ long unsigned int ffbit[2];
+ long unsigned int swbit[1];
+ unsigned int hint_events_per_packet;
+ unsigned int keycodemax;
+ unsigned int keycodesize;
+ void *keycode;
+ int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *);
+ int (*getkeycode)(struct input_dev *, struct input_keymap_entry *);
+ struct ff_device *ff;
+ struct input_dev_poller *poller;
+ unsigned int repeat_key;
+ struct timer_list timer;
+ int rep[2];
+ struct input_mt *mt;
+ struct input_absinfo *absinfo;
+ long unsigned int key[12];
+ long unsigned int led[1];
+ long unsigned int snd[1];
+ long unsigned int sw[1];
+ int (*open)(struct input_dev *);
+ void (*close)(struct input_dev *);
+ int (*flush)(struct input_dev *, struct file *);
+ int (*event)(struct input_dev *, unsigned int, unsigned int, int);
+ struct input_handle *grab;
+ spinlock_t event_lock;
+ struct mutex mutex;
+ unsigned int users;
+ bool going_away;
+ struct device dev;
+ struct list_head h_list;
+ struct list_head node;
+ unsigned int num_vals;
+ unsigned int max_vals;
+ struct input_value *vals;
+ bool devres_managed;
+ ktime_t timestamp[3];
+};
+
+struct ff_device {
+ int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *);
+ int (*erase)(struct input_dev *, int);
+ int (*playback)(struct input_dev *, int, int);
+ void (*set_gain)(struct input_dev *, u16);
+ void (*set_autocenter)(struct input_dev *, u16);
+ void (*destroy)(struct ff_device *);
+ void *private;
+ long unsigned int ffbit[2];
+ struct mutex mutex;
+ int max_effects;
+ struct ff_effect *effects;
+ struct file *effect_owners[0];
+};
+
+struct input_handler;
+
+struct input_handle {
+ void *private;
+ int open;
+ const char *name;
+ struct input_dev *dev;
+ struct input_handler *handler;
+ struct list_head d_node;
+ struct list_head h_node;
+};
+
+struct input_handler {
+ void *private;
+ void (*event)(struct input_handle *, unsigned int, unsigned int, int);
+ void (*events)(struct input_handle *, const struct input_value *, unsigned int);
+ bool (*filter)(struct input_handle *, unsigned int, unsigned int, int);
+ bool (*match)(struct input_handler *, struct input_dev *);
+ int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *);
+ void (*disconnect)(struct input_handle *);
+ void (*start)(struct input_handle *);
+ bool legacy_minors;
+ int minor;
+ const char *name;
+ const struct input_device_id *id_table;
+ struct list_head h_list;
+ struct list_head node;
+};
+
+enum {
+ ACPI_BUTTON_LID_INIT_IGNORE = 0,
+ ACPI_BUTTON_LID_INIT_OPEN = 1,
+ ACPI_BUTTON_LID_INIT_METHOD = 2,
+ ACPI_BUTTON_LID_INIT_DISABLED = 3,
+};
+
+struct acpi_button {
+ unsigned int type;
+ struct input_dev *input;
+ char phys[32];
+ long unsigned int pushed;
+ int last_state;
+ ktime_t last_time;
+ bool suspended;
+};
+
+struct acpi_pci_slot {
+ struct pci_slot *pci_slot;
+ struct list_head list;
+};
+
+struct acpi_lpi_states_array {
+ unsigned int size;
+ unsigned int composite_states_size;
+ struct acpi_lpi_state *entries;
+ struct acpi_lpi_state *composite_states[8];
+};
+
+struct throttling_tstate {
+ unsigned int cpu;
+ int target_state;
+};
+
+struct acpi_processor_throttling_arg {
+ struct acpi_processor *pr;
+ int target_state;
+ bool force;
+};
+
+struct container_dev {
+ struct device dev;
+ int (*offline)(struct container_dev *);
+};
+
+enum {
+ NDD_ALIASING = 0,
+ NDD_UNARMED = 1,
+ NDD_LOCKED = 2,
+ NDD_SECURITY_OVERWRITE = 3,
+ NDD_WORK_PENDING = 4,
+ NDD_NOBLK = 5,
+ NDD_LABELING = 6,
+ ND_IOCTL_MAX_BUFLEN = 4194304,
+ ND_CMD_MAX_ELEM = 5,
+ ND_CMD_MAX_ENVELOPE = 256,
+ ND_MAX_MAPPINGS = 32,
+ ND_REGION_PAGEMAP = 0,
+ ND_REGION_PERSIST_CACHE = 1,
+ ND_REGION_PERSIST_MEMCTRL = 2,
+ ND_REGION_ASYNC = 3,
+ DPA_RESOURCE_ADJUSTED = 1,
+};
+
+struct nvdimm_bus_descriptor;
+
+struct nvdimm;
+
+typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *, unsigned int, int *);
+
+struct nvdimm_bus_descriptor {
+ const struct attribute_group **attr_groups;
+ long unsigned int bus_dsm_mask;
+ long unsigned int cmd_mask;
+ struct module *module;
+ char *provider_name;
+ struct device_node *of_node;
+ ndctl_fn ndctl;
+ int (*flush_probe)(struct nvdimm_bus_descriptor *);
+ int (*clear_to_send)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *);
+};
+
+struct nd_cmd_desc {
+ int in_num;
+ int out_num;
+ u32 in_sizes[5];
+ int out_sizes[5];
+};
+
+struct nd_interleave_set {
+ u64 cookie1;
+ u64 cookie2;
+ u64 altcookie;
+ guid_t type_guid;
+};
+
+struct nd_mapping_desc {
+ struct nvdimm *nvdimm;
+ u64 start;
+ u64 size;
+ int position;
+};
+
+struct nd_region;
+
+struct nd_region_desc {
+ struct resource *res;
+ struct nd_mapping_desc *mapping;
+ u16 num_mappings;
+ const struct attribute_group **attr_groups;
+ struct nd_interleave_set *nd_set;
+ void *provider_data;
+ int num_lanes;
+ int numa_node;
+ int target_node;
+ long unsigned int flags;
+ struct device_node *of_node;
+ int (*flush)(struct nd_region *, struct bio *);
+};
+
+struct nvdimm_bus;
+
+struct nd_blk_region;
+
+struct nd_blk_region_desc {
+ int (*enable)(struct nvdimm_bus *, struct device *);
+ int (*do_io)(struct nd_blk_region *, resource_size_t, void *, u64, int);
+ struct nd_region_desc ndr_desc;
+};
+
+struct nvdimm_key_data {
+ u8 data[32];
+};
+
+enum nvdimm_passphrase_type {
+ NVDIMM_USER = 0,
+ NVDIMM_MASTER = 1,
+};
+
+struct nvdimm_security_ops {
+ long unsigned int (*get_flags)(struct nvdimm *, enum nvdimm_passphrase_type);
+ int (*freeze)(struct nvdimm *);
+ int (*change_key)(struct nvdimm *, const struct nvdimm_key_data *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type);
+ int (*unlock)(struct nvdimm *, const struct nvdimm_key_data *);
+ int (*disable)(struct nvdimm *, const struct nvdimm_key_data *);
+ int (*erase)(struct nvdimm *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type);
+ int (*overwrite)(struct nvdimm *, const struct nvdimm_key_data *);
+ int (*query_overwrite)(struct nvdimm *);
+};
+
+struct nd_cmd_dimm_flags {
+ __u32 status;
+ __u32 flags;
+};
+
+struct nd_cmd_get_config_data_hdr {
+ __u32 in_offset;
+ __u32 in_length;
+ __u32 status;
+ __u8 out_buf[0];
+};
+
+struct nd_cmd_set_config_hdr {
+ __u32 in_offset;
+ __u32 in_length;
+ __u8 in_buf[0];
+};
+
+struct nd_cmd_ars_cap {
+ __u64 address;
+ __u64 length;
+ __u32 status;
+ __u32 max_ars_out;
+ __u32 clear_err_unit;
+ __u16 flags;
+ __u16 reserved;
+};
+
+struct nd_cmd_ars_start {
+ __u64 address;
+ __u64 length;
+ __u16 type;
+ __u8 flags;
+ __u8 reserved[5];
+ __u32 status;
+ __u32 scrub_time;
+};
+
+struct nd_ars_record {
+ __u32 handle;
+ __u32 reserved;
+ __u64 err_address;
+ __u64 length;
+};
+
+struct nd_cmd_ars_status {
+ __u32 status;
+ __u32 out_length;
+ __u64 address;
+ __u64 length;
+ __u64 restart_address;
+ __u64 restart_length;
+ __u16 type;
+ __u16 flags;
+ __u32 num_records;
+ struct nd_ars_record records[0];
+};
+
+struct nd_cmd_clear_error {
+ __u64 address;
+ __u64 length;
+ __u32 status;
+ __u8 reserved[4];
+ __u64 cleared;
+};
+
+enum {
+ ND_CMD_IMPLEMENTED = 0,
+ ND_CMD_ARS_CAP = 1,
+ ND_CMD_ARS_START = 2,
+ ND_CMD_ARS_STATUS = 3,
+ ND_CMD_CLEAR_ERROR = 4,
+ ND_CMD_SMART = 1,
+ ND_CMD_SMART_THRESHOLD = 2,
+ ND_CMD_DIMM_FLAGS = 3,
+ ND_CMD_GET_CONFIG_SIZE = 4,
+ ND_CMD_GET_CONFIG_DATA = 5,
+ ND_CMD_SET_CONFIG_DATA = 6,
+ ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
+ ND_CMD_VENDOR_EFFECT_LOG = 8,
+ ND_CMD_VENDOR = 9,
+ ND_CMD_CALL = 10,
+};
+
+enum {
+ ND_ARS_VOLATILE = 1,
+ ND_ARS_PERSISTENT = 2,
+ ND_ARS_RETURN_PREV_DATA = 2,
+ ND_CONFIG_LOCKED = 1,
+};
+
+struct nd_cmd_pkg {
+ __u64 nd_family;
+ __u64 nd_command;
+ __u32 nd_size_in;
+ __u32 nd_size_out;
+ __u32 nd_reserved2[9];
+ __u32 nd_fw_size;
+ unsigned char nd_payload[0];
+};
+
+struct acpi_nfit_header {
+ u16 type;
+ u16 length;
+};
+
+enum acpi_nfit_type {
+ ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
+ ACPI_NFIT_TYPE_MEMORY_MAP = 1,
+ ACPI_NFIT_TYPE_INTERLEAVE = 2,
+ ACPI_NFIT_TYPE_SMBIOS = 3,
+ ACPI_NFIT_TYPE_CONTROL_REGION = 4,
+ ACPI_NFIT_TYPE_DATA_REGION = 5,
+ ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
+ ACPI_NFIT_TYPE_CAPABILITIES = 7,
+ ACPI_NFIT_TYPE_RESERVED = 8,
+};
+
+struct acpi_nfit_system_address {
+ struct acpi_nfit_header header;
+ u16 range_index;
+ u16 flags;
+ u32 reserved;
+ u32 proximity_domain;
+ u8 range_guid[16];
+ u64 address;
+ u64 length;
+ u64 memory_mapping;
+};
+
+struct acpi_nfit_memory_map {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 physical_id;
+ u16 region_id;
+ u16 range_index;
+ u16 region_index;
+ u64 region_size;
+ u64 region_offset;
+ u64 address;
+ u16 interleave_index;
+ u16 interleave_ways;
+ u16 flags;
+ u16 reserved;
+};
+
+struct acpi_nfit_interleave {
+ struct acpi_nfit_header header;
+ u16 interleave_index;
+ u16 reserved;
+ u32 line_count;
+ u32 line_size;
+ u32 line_offset[1];
+};
+
+struct acpi_nfit_control_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_revision_id;
+ u8 valid_fields;
+ u8 manufacturing_location;
+ u16 manufacturing_date;
+ u8 reserved[2];
+ u32 serial_number;
+ u16 code;
+ u16 windows;
+ u64 window_size;
+ u64 command_offset;
+ u64 command_size;
+ u64 status_offset;
+ u64 status_size;
+ u16 flags;
+ u8 reserved1[6];
+};
+
+struct acpi_nfit_data_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 windows;
+ u64 offset;
+ u64 size;
+ u64 capacity;
+ u64 start_address;
+};
+
+struct acpi_nfit_flush_address {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 hint_count;
+ u8 reserved[6];
+ u64 hint_address[1];
+};
+
+struct acpi_nfit_capabilities {
+ struct acpi_nfit_header header;
+ u8 highest_capability;
+ u8 reserved[3];
+ u32 capabilities;
+ u32 reserved2;
+};
+
+enum nvdimm_event {
+ NVDIMM_REVALIDATE_POISON = 0,
+};
+
+struct nd_intel_smart {
+ u32 status;
+ union {
+ struct {
+ u32 flags;
+ u8 reserved0[4];
+ u8 health;
+ u8 spares;
+ u8 life_used;
+ u8 alarm_flags;
+ u16 media_temperature;
+ u16 ctrl_temperature;
+ u32 shutdown_count;
+ u8 ait_status;
+ u16 pmic_temperature;
+ u8 reserved1[8];
+ u8 shutdown_state;
+ u32 vendor_size;
+ u8 vendor_data[92];
+ } __attribute__((packed));
+ u8 data[128];
+ };
+};
+
+enum nvdimm_family_cmds {
+ NVDIMM_INTEL_LATCH_SHUTDOWN = 10,
+ NVDIMM_INTEL_GET_MODES = 11,
+ NVDIMM_INTEL_GET_FWINFO = 12,
+ NVDIMM_INTEL_START_FWUPDATE = 13,
+ NVDIMM_INTEL_SEND_FWUPDATE = 14,
+ NVDIMM_INTEL_FINISH_FWUPDATE = 15,
+ NVDIMM_INTEL_QUERY_FWUPDATE = 16,
+ NVDIMM_INTEL_SET_THRESHOLD = 17,
+ NVDIMM_INTEL_INJECT_ERROR = 18,
+ NVDIMM_INTEL_GET_SECURITY_STATE = 19,
+ NVDIMM_INTEL_SET_PASSPHRASE = 20,
+ NVDIMM_INTEL_DISABLE_PASSPHRASE = 21,
+ NVDIMM_INTEL_UNLOCK_UNIT = 22,
+ NVDIMM_INTEL_FREEZE_LOCK = 23,
+ NVDIMM_INTEL_SECURE_ERASE = 24,
+ NVDIMM_INTEL_OVERWRITE = 25,
+ NVDIMM_INTEL_QUERY_OVERWRITE = 26,
+ NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27,
+ NVDIMM_INTEL_MASTER_SECURE_ERASE = 28,
+};
+
+enum nfit_uuids {
+ NFIT_DEV_DIMM = 0,
+ NFIT_DEV_DIMM_N_HPE1 = 1,
+ NFIT_DEV_DIMM_N_HPE2 = 2,
+ NFIT_DEV_DIMM_N_MSFT = 3,
+ NFIT_DEV_DIMM_N_HYPERV = 4,
+ NFIT_SPA_VOLATILE = 5,
+ NFIT_SPA_PM = 6,
+ NFIT_SPA_DCR = 7,
+ NFIT_SPA_BDW = 8,
+ NFIT_SPA_VDISK = 9,
+ NFIT_SPA_VCD = 10,
+ NFIT_SPA_PDISK = 11,
+ NFIT_SPA_PCD = 12,
+ NFIT_DEV_BUS = 13,
+ NFIT_UUID_MAX = 14,
+};
+
+enum {
+ NFIT_BLK_READ_FLUSH = 1,
+ NFIT_BLK_DCR_LATCH = 2,
+ NFIT_ARS_STATUS_DONE = 0,
+ NFIT_ARS_STATUS_BUSY = 65536,
+ NFIT_ARS_STATUS_NONE = 131072,
+ NFIT_ARS_STATUS_INTR = 196608,
+ NFIT_ARS_START_BUSY = 6,
+ NFIT_ARS_CAP_NONE = 1,
+ NFIT_ARS_F_OVERFLOW = 1,
+ NFIT_ARS_TIMEOUT = 90,
+};
+
+enum nfit_root_notifiers {
+ NFIT_NOTIFY_UPDATE = 128,
+ NFIT_NOTIFY_UC_MEMORY_ERROR = 129,
+};
+
+enum nfit_dimm_notifiers {
+ NFIT_NOTIFY_DIMM_HEALTH = 129,
+};
+
+enum nfit_ars_state {
+ ARS_REQ_SHORT = 0,
+ ARS_REQ_LONG = 1,
+ ARS_FAILED = 2,
+};
+
+struct nfit_spa {
+ struct list_head list;
+ struct nd_region *nd_region;
+ long unsigned int ars_state;
+ u32 clear_err_unit;
+ u32 max_ars;
+ struct acpi_nfit_system_address spa[0];
+};
+
+struct nfit_dcr {
+ struct list_head list;
+ struct acpi_nfit_control_region dcr[0];
+};
+
+struct nfit_bdw {
+ struct list_head list;
+ struct acpi_nfit_data_region bdw[0];
+};
+
+struct nfit_idt {
+ struct list_head list;
+ struct acpi_nfit_interleave idt[0];
+};
+
+struct nfit_flush {
+ struct list_head list;
+ struct acpi_nfit_flush_address flush[0];
+};
+
+struct nfit_memdev {
+ struct list_head list;
+ struct acpi_nfit_memory_map memdev[0];
+};
+
+enum nfit_mem_flags {
+ NFIT_MEM_LSR = 0,
+ NFIT_MEM_LSW = 1,
+ NFIT_MEM_DIRTY = 2,
+ NFIT_MEM_DIRTY_COUNT = 3,
+};
+
+struct acpi_nfit_desc;
+
+struct nfit_mem {
+ struct nvdimm *nvdimm;
+ struct acpi_nfit_memory_map *memdev_dcr;
+ struct acpi_nfit_memory_map *memdev_pmem;
+ struct acpi_nfit_memory_map *memdev_bdw;
+ struct acpi_nfit_control_region *dcr;
+ struct acpi_nfit_data_region *bdw;
+ struct acpi_nfit_system_address *spa_dcr;
+ struct acpi_nfit_system_address *spa_bdw;
+ struct acpi_nfit_interleave *idt_dcr;
+ struct acpi_nfit_interleave *idt_bdw;
+ struct kernfs_node *flags_attr;
+ struct nfit_flush *nfit_flush;
+ struct list_head list;
+ struct acpi_device *adev;
+ struct acpi_nfit_desc *acpi_desc;
+ char id[23];
+ struct resource *flush_wpq;
+ long unsigned int dsm_mask;
+ long unsigned int flags;
+ u32 dirty_shutdown;
+ int family;
+};
+
+struct acpi_nfit_desc {
+ struct nvdimm_bus_descriptor nd_desc;
+ struct acpi_table_header acpi_header;
+ struct mutex init_mutex;
+ struct list_head memdevs;
+ struct list_head flushes;
+ struct list_head dimms;
+ struct list_head spas;
+ struct list_head dcrs;
+ struct list_head bdws;
+ struct list_head idts;
+ struct nvdimm_bus *nvdimm_bus;
+ struct device *dev;
+ struct nd_cmd_ars_status *ars_status;
+ struct nfit_spa *scrub_spa;
+ struct delayed_work dwork;
+ struct list_head list;
+ struct kernfs_node *scrub_count_state;
+ unsigned int max_ars;
+ unsigned int scrub_count;
+ unsigned int scrub_mode;
+ long unsigned int scrub_flags;
+ long unsigned int dimm_cmd_force_en;
+ long unsigned int bus_cmd_force_en;
+ long unsigned int bus_nfit_cmd_force_en;
+ unsigned int platform_cap;
+ unsigned int scrub_tmo;
+ int (*blk_do_io)(struct nd_blk_region *, resource_size_t, void *, u64, int);
+};
+
+enum scrub_flags {
+ ARS_BUSY = 0,
+ ARS_CANCEL = 1,
+ ARS_VALID = 2,
+ ARS_POLL = 3,
+};
+
+enum scrub_mode {
+ HW_ERROR_SCRUB_OFF = 0,
+ HW_ERROR_SCRUB_ON = 1,
+};
+
+enum nd_blk_mmio_selector {
+ BDW = 0,
+ DCR = 1,
+};
+
+struct nd_blk_addr {
+ union {
+ void *base;
+ void *aperture;
+ };
+};
+
+struct nfit_blk_mmio {
+ struct nd_blk_addr addr;
+ u64 size;
+ u64 base_offset;
+ u32 line_size;
+ u32 num_lines;
+ u32 table_size;
+ struct acpi_nfit_interleave *idt;
+ struct acpi_nfit_system_address *spa;
+};
+
+struct nfit_blk {
+ struct nfit_blk_mmio mmio[2];
+ struct nd_region *nd_region;
+ u64 bdw_offset;
+ u64 stat_offset;
+ u64 cmd_offset;
+ u32 dimm_flags;
+};
+
+struct nfit_table_prev {
+ struct list_head spas;
+ struct list_head memdevs;
+ struct list_head dcrs;
+ struct list_head bdws;
+ struct list_head idts;
+ struct list_head flushes;
+};
+
+enum nfit_aux_cmds {
+ NFIT_CMD_TRANSLATE_SPA = 5,
+ NFIT_CMD_ARS_INJECT_SET = 7,
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
+ NFIT_CMD_ARS_INJECT_GET = 9,
+};
+
+struct nfit_set_info_map {
+ u64 region_offset;
+ u32 serial_number;
+ u32 pad;
+};
+
+struct nfit_set_info {
+ struct nfit_set_info_map mapping[0];
+};
+
+struct nfit_set_info_map2 {
+ u64 region_offset;
+ u32 serial_number;
+ u16 vendor_id;
+ u16 manufacturing_date;
+ u8 manufacturing_location;
+ u8 reserved[31];
+};
+
+struct nfit_set_info2 {
+ struct nfit_set_info_map2 mapping[0];
+};
+
+enum {
+ BCW_OFFSET_MASK = -1,
+ BCW_LEN_SHIFT = 48,
+ BCW_LEN_MASK = 255,
+ BCW_CMD_SHIFT = 56,
+};
+
+enum nvdimm_security_bits {
+ NVDIMM_SECURITY_DISABLED = 0,
+ NVDIMM_SECURITY_UNLOCKED = 1,
+ NVDIMM_SECURITY_LOCKED = 2,
+ NVDIMM_SECURITY_FROZEN = 3,
+ NVDIMM_SECURITY_OVERWRITE = 4,
+};
+
+struct nd_intel_get_security_state {
+ u32 status;
+ u8 extended_state;
+ u8 reserved[3];
+ u8 state;
+ u8 reserved1[3];
+};
+
+struct nd_intel_set_passphrase {
+ u8 old_pass[32];
+ u8 new_pass[32];
+ u32 status;
+};
+
+struct nd_intel_unlock_unit {
+ u8 passphrase[32];
+ u32 status;
+};
+
+struct nd_intel_disable_passphrase {
+ u8 passphrase[32];
+ u32 status;
+};
+
+struct nd_intel_freeze_lock {
+ u32 status;
+};
+
+struct nd_intel_secure_erase {
+ u8 passphrase[32];
+ u32 status;
+};
+
+struct nd_intel_overwrite {
+ u8 passphrase[32];
+ u32 status;
+};
+
+struct nd_intel_query_overwrite {
+ u32 status;
+};
+
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr;
+ u64 length;
+ short unsigned int caching;
+ short unsigned int write_protect;
+ unsigned int enabled: 1;
+};
+
+struct acpi_memory_device {
+ struct acpi_device *device;
+ unsigned int state;
+ struct list_head res_list;
+};
+
+struct acpi_pci_ioapic {
+ acpi_handle root_handle;
+ acpi_handle handle;
+ u32 gsi_base;
+ struct resource res;
+ struct pci_dev *pdev;
+ struct list_head list;
+};
+
+struct pnp_resource {
+ struct list_head list;
+ struct resource res;
+};
+
+struct pnp_port {
+ resource_size_t min;
+ resource_size_t max;
+ resource_size_t align;
+ resource_size_t size;
+ unsigned char flags;
+};
+
+typedef struct {
+ long unsigned int bits[4];
+} pnp_irq_mask_t;
+
+struct pnp_irq {
+ pnp_irq_mask_t map;
+ unsigned char flags;
+};
+
+struct pnp_dma {
+ unsigned char map;
+ unsigned char flags;
+};
+
+struct pnp_mem {
+ resource_size_t min;
+ resource_size_t max;
+ resource_size_t align;
+ resource_size_t size;
+ unsigned char flags;
+};
+
+struct pnp_option {
+ struct list_head list;
+ unsigned int flags;
+ long unsigned int type;
+ union {
+ struct pnp_port port;
+ struct pnp_irq irq;
+ struct pnp_dma dma;
+ struct pnp_mem mem;
+ } u;
+};
+
+struct pnp_info_buffer {
+ char *buffer;
+ char *curr;
+ long unsigned int size;
+ long unsigned int len;
+ int stop;
+ int error;
+};
+
+typedef struct pnp_info_buffer pnp_info_buffer_t;
+
+struct pnp_fixup {
+ char id[7];
+ void (*quirk_function)(struct pnp_dev *);
+};
+
+struct acpipnp_parse_option_s {
+ struct pnp_dev *dev;
+ unsigned int option_flags;
+};
+
+struct clk_bulk_data {
+ const char *id;
+ struct clk *clk;
+};
+
+struct clk_bulk_devres {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+struct clk_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct clk *clk;
+ struct clk_hw *clk_hw;
+};
+
+struct clk_lookup_alloc {
+ struct clk_lookup cl;
+ char dev_id[20];
+ char con_id[16];
+};
+
+struct clk_notifier {
+ struct clk *clk;
+ struct srcu_notifier_head notifier_head;
+ struct list_head node;
+};
+
+struct clk {
+ struct clk_core *core;
+ struct device *dev;
+ const char *dev_id;
+ const char *con_id;
+ long unsigned int min_rate;
+ long unsigned int max_rate;
+ unsigned int exclusive_count;
+ struct hlist_node clks_node;
+};
+
+struct clk_notifier_data {
+ struct clk *clk;
+ long unsigned int old_rate;
+ long unsigned int new_rate;
+};
+
+struct clk_parent_map;
+
+struct clk_core {
+ const char *name;
+ const struct clk_ops *ops;
+ struct clk_hw *hw;
+ struct module *owner;
+ struct device *dev;
+ struct device_node *of_node;
+ struct clk_core *parent;
+ struct clk_parent_map *parents;
+ u8 num_parents;
+ u8 new_parent_index;
+ long unsigned int rate;
+ long unsigned int req_rate;
+ long unsigned int new_rate;
+ struct clk_core *new_parent;
+ struct clk_core *new_child;
+ long unsigned int flags;
+ bool orphan;
+ bool rpm_enabled;
+ unsigned int enable_count;
+ unsigned int prepare_count;
+ unsigned int protect_count;
+ long unsigned int min_rate;
+ long unsigned int max_rate;
+ long unsigned int accuracy;
+ int phase;
+ struct clk_duty duty;
+ struct hlist_head children;
+ struct hlist_node child_node;
+ struct hlist_head clks;
+ unsigned int notifier_count;
+ struct dentry *dentry;
+ struct hlist_node debug_node;
+ struct kref ref;
+};
+
+struct clk_parent_map {
+ const struct clk_hw *hw;
+ struct clk_core *core;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
+struct trace_event_raw_clk {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_clk_rate {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ long unsigned int rate;
+ char __data[0];
+};
+
+struct trace_event_raw_clk_parent {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u32 __data_loc_pname;
+ char __data[0];
+};
+
+struct trace_event_raw_clk_phase {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ int phase;
+ char __data[0];
+};
+
+struct trace_event_raw_clk_duty_cycle {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ unsigned int num;
+ unsigned int den;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_clk {
+ u32 name;
+};
+
+struct trace_event_data_offsets_clk_rate {
+ u32 name;
+};
+
+struct trace_event_data_offsets_clk_parent {
+ u32 name;
+ u32 pname;
+};
+
+struct trace_event_data_offsets_clk_phase {
+ u32 name;
+};
+
+struct trace_event_data_offsets_clk_duty_cycle {
+ u32 name;
+};
+
+typedef void (*btf_trace_clk_enable)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_disable)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *);
+
+typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int);
+
+typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int);
+
+typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *);
+
+typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *);
+
+typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int);
+
+typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int);
+
+typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *);
+
+typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *);
+
+struct clk_div_table {
+ unsigned int val;
+ unsigned int div;
+};
+
+struct clk_divider {
+ struct clk_hw hw;
+ void *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock;
+};
+
+struct clk_fixed_factor {
+ struct clk_hw hw;
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct clk_fixed_rate {
+ struct clk_hw hw;
+ long unsigned int fixed_rate;
+ long unsigned int fixed_accuracy;
+ long unsigned int flags;
+};
+
+struct clk_gate {
+ struct clk_hw hw;
+ void *reg;
+ u8 bit_idx;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+struct clk_multiplier {
+ struct clk_hw hw;
+ void *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+struct clk_mux {
+ struct clk_hw hw;
+ void *reg;
+ u32 *table;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+struct clk_composite {
+ struct clk_hw hw;
+ struct clk_ops ops;
+ struct clk_hw *mux_hw;
+ struct clk_hw *rate_hw;
+ struct clk_hw *gate_hw;
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *rate_ops;
+ const struct clk_ops *gate_ops;
+};
+
+struct clk_fractional_divider {
+ struct clk_hw hw;
+ void *reg;
+ u8 mshift;
+ u8 mwidth;
+ u32 mmask;
+ u8 nshift;
+ u8 nwidth;
+ u32 nmask;
+ u8 flags;
+ void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *);
+ spinlock_t *lock;
+};
+
+enum gpiod_flags {
+ GPIOD_ASIS = 0,
+ GPIOD_IN = 1,
+ GPIOD_OUT_LOW = 3,
+ GPIOD_OUT_HIGH = 7,
+ GPIOD_OUT_LOW_OPEN_DRAIN = 11,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = 15,
+};
+
+struct clk_gpio {
+ struct clk_hw hw;
+ struct gpio_desc *gpiod;
+};
+
+struct pmc_clk {
+ const char *name;
+ long unsigned int freq;
+ const char *parent_name;
+};
+
+struct pmc_clk_data {
+ void *base;
+ const struct pmc_clk *clks;
+ bool critical;
+};
+
+struct clk_plt_fixed {
+ struct clk_hw *clk;
+ struct clk_lookup *lookup;
+};
+
+struct clk_plt {
+ struct clk_hw hw;
+ void *reg;
+ struct clk_lookup *lookup;
+ spinlock_t lock;
+};
+
+struct clk_plt_data {
+ struct clk_plt_fixed **parents;
+ u8 nparents;
+ struct clk_plt *clks[6];
+ struct clk_lookup *mclk_lookup;
+ struct clk_lookup *ether_clk_lookup;
+};
+
+struct virtio_driver {
+ struct device_driver driver;
+ const struct virtio_device_id *id_table;
+ const unsigned int *feature_table;
+ unsigned int feature_table_size;
+ const unsigned int *feature_table_legacy;
+ unsigned int feature_table_size_legacy;
+ int (*validate)(struct virtio_device *);
+ int (*probe)(struct virtio_device *);
+ void (*scan)(struct virtio_device *);
+ void (*remove)(struct virtio_device *);
+ void (*config_changed)(struct virtio_device *);
+};
+
+typedef __u16 __virtio16;
+
+typedef __u32 __virtio32;
+
+typedef __u64 __virtio64;
+
+struct vring_desc {
+ __virtio64 addr;
+ __virtio32 len;
+ __virtio16 flags;
+ __virtio16 next;
+};
+
+struct vring_avail {
+ __virtio16 flags;
+ __virtio16 idx;
+ __virtio16 ring[0];
+};
+
+struct vring_used_elem {
+ __virtio32 id;
+ __virtio32 len;
+};
+
+typedef struct vring_used_elem vring_used_elem_t;
+
+struct vring_used {
+ __virtio16 flags;
+ __virtio16 idx;
+ vring_used_elem_t ring[0];
+};
+
+typedef struct vring_desc vring_desc_t;
+
+typedef struct vring_avail vring_avail_t;
+
+typedef struct vring_used vring_used_t;
+
+struct vring {
+ unsigned int num;
+ vring_desc_t *desc;
+ vring_avail_t *avail;
+ vring_used_t *used;
+};
+
+struct vring_packed_desc_event {
+ __le16 off_wrap;
+ __le16 flags;
+};
+
+struct vring_packed_desc {
+ __le64 addr;
+ __le32 len;
+ __le16 id;
+ __le16 flags;
+};
+
+struct vring_desc_state_split {
+ void *data;
+ struct vring_desc *indir_desc;
+};
+
+struct vring_desc_state_packed {
+ void *data;
+ struct vring_packed_desc *indir_desc;
+ u16 num;
+ u16 next;
+ u16 last;
+};
+
+struct vring_desc_extra_packed {
+ dma_addr_t addr;
+ u32 len;
+ u16 flags;
+};
+
+struct vring_virtqueue {
+ struct virtqueue vq;
+ bool packed_ring;
+ bool use_dma_api;
+ bool weak_barriers;
+ bool broken;
+ bool indirect;
+ bool event;
+ unsigned int free_head;
+ unsigned int num_added;
+ u16 last_used_idx;
+ union {
+ struct {
+ struct vring vring;
+ u16 avail_flags_shadow;
+ u16 avail_idx_shadow;
+ struct vring_desc_state_split *desc_state;
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+ } split;
+ struct {
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+ u16 avail_used_flags;
+ u16 next_avail_idx;
+ u16 event_flags_shadow;
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra_packed *desc_extra;
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+ } packed;
+ };
+ bool (*notify)(struct virtqueue *);
+ bool we_own_ring;
+};
+
+struct virtio_mmio_device {
+ struct virtio_device vdev;
+ struct platform_device *pdev;
+ void *base;
+ long unsigned int version;
+ spinlock_t lock;
+ struct list_head virtqueues;
+};
+
+struct virtio_mmio_vq_info {
+ struct virtqueue *vq;
+ struct list_head node;
+};
+
+struct virtio_pci_common_cfg {
+ __le32 device_feature_select;
+ __le32 device_feature;
+ __le32 guest_feature_select;
+ __le32 guest_feature;
+ __le16 msix_config;
+ __le16 num_queues;
+ __u8 device_status;
+ __u8 config_generation;
+ __le16 queue_select;
+ __le16 queue_size;
+ __le16 queue_msix_vector;
+ __le16 queue_enable;
+ __le16 queue_notify_off;
+ __le32 queue_desc_lo;
+ __le32 queue_desc_hi;
+ __le32 queue_avail_lo;
+ __le32 queue_avail_hi;
+ __le32 queue_used_lo;
+ __le32 queue_used_hi;
+};
+
+struct virtio_pci_vq_info {
+ struct virtqueue *vq;
+ struct list_head node;
+ unsigned int msix_vector;
+};
+
+struct virtio_pci_device {
+ struct virtio_device vdev;
+ struct pci_dev *pci_dev;
+ u8 *isr;
+ struct virtio_pci_common_cfg *common;
+ void *device;
+ void *notify_base;
+ size_t notify_len;
+ size_t device_len;
+ int notify_map_cap;
+ u32 notify_offset_multiplier;
+ int modern_bars;
+ void *ioaddr;
+ spinlock_t lock;
+ struct list_head virtqueues;
+ struct virtio_pci_vq_info **vqs;
+ int msix_enabled;
+ int intx_enabled;
+ cpumask_var_t *msix_affinity_masks;
+ char (*msix_names)[256];
+ unsigned int msix_vectors;
+ unsigned int msix_used_vectors;
+ bool per_vq_vectors;
+ struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16);
+ void (*del_vq)(struct virtio_pci_vq_info *);
+ u16 (*config_vector)(struct virtio_pci_device *, u16);
+};
+
+enum {
+ VP_MSIX_CONFIG_VECTOR = 0,
+ VP_MSIX_VQ_VECTOR = 1,
+};
+
+struct tty_file_private {
+ struct tty_struct *tty;
+ struct file *file;
+ struct list_head list;
+};
+
+struct n_tty_data {
+ size_t read_head;
+ size_t commit_head;
+ size_t canon_head;
+ size_t echo_head;
+ size_t echo_commit;
+ size_t echo_mark;
+ long unsigned int char_map[4];
+ long unsigned int overrun_time;
+ int num_overrun;
+ bool no_room;
+ unsigned char lnext: 1;
+ unsigned char erasing: 1;
+ unsigned char raw: 1;
+ unsigned char real_raw: 1;
+ unsigned char icanon: 1;
+ unsigned char push: 1;
+ char read_buf[4096];
+ long unsigned int read_flags[64];
+ unsigned char echo_buf[4096];
+ size_t read_tail;
+ size_t line_start;
+ unsigned int column;
+ unsigned int canon_column;
+ size_t echo_tail;
+ struct mutex atomic_read_lock;
+ struct mutex output_lock;
+};
+
+enum {
+ ERASE = 0,
+ WERASE = 1,
+ KILL = 2,
+};
+
+struct termios2 {
+ tcflag_t c_iflag;
+ tcflag_t c_oflag;
+ tcflag_t c_cflag;
+ tcflag_t c_lflag;
+ cc_t c_line;
+ cc_t c_cc[19];
+ speed_t c_ispeed;
+ speed_t c_ospeed;
+};
+
+struct termio {
+ short unsigned int c_iflag;
+ short unsigned int c_oflag;
+ short unsigned int c_cflag;
+ short unsigned int c_lflag;
+ unsigned char c_line;
+ unsigned char c_cc[8];
+};
+
+enum {
+ LDISC_SEM_NORMAL = 0,
+ LDISC_SEM_OTHER = 1,
+};
+
+enum {
+ TTY_LOCK_NORMAL = 0,
+ TTY_LOCK_SLAVE = 1,
+};
+
+struct ldsem_waiter {
+ struct list_head list;
+ struct task_struct *task;
+};
+
+struct pts_fs_info___2;
+
+struct tty_audit_buf {
+ struct mutex mutex;
+ dev_t dev;
+ unsigned int icanon: 1;
+ size_t valid;
+ unsigned char *data;
+};
+
+struct consolefontdesc {
+ short unsigned int charcount;
+ short unsigned int charheight;
+ char *chardata;
+};
+
+struct unipair {
+ short unsigned int unicode;
+ short unsigned int fontpos;
+};
+
+struct unimapdesc {
+ short unsigned int entry_ct;
+ struct unipair *entries;
+};
+
+struct kbdiacruc {
+ unsigned int diacr;
+ unsigned int base;
+ unsigned int result;
+};
+
+struct kbd_repeat {
+ int delay;
+ int period;
+};
+
+struct console_font_op {
+ unsigned int op;
+ unsigned int flags;
+ unsigned int width;
+ unsigned int height;
+ unsigned int charcount;
+ unsigned char *data;
+};
+
+struct vt_stat {
+ short unsigned int v_active;
+ short unsigned int v_signal;
+ short unsigned int v_state;
+};
+
+struct vt_sizes {
+ short unsigned int v_rows;
+ short unsigned int v_cols;
+ short unsigned int v_scrollsize;
+};
+
+struct vt_consize {
+ short unsigned int v_rows;
+ short unsigned int v_cols;
+ short unsigned int v_vlin;
+ short unsigned int v_clin;
+ short unsigned int v_vcol;
+ short unsigned int v_ccol;
+};
+
+struct vt_event {
+ unsigned int event;
+ unsigned int oldev;
+ unsigned int newev;
+ unsigned int pad[4];
+};
+
+struct vt_setactivate {
+ unsigned int console;
+ struct vt_mode mode;
+};
+
+struct vt_event_wait {
+ struct list_head list;
+ struct vt_event event;
+ int done;
+};
+
+struct vt_notifier_param {
+ struct vc_data *vc;
+ unsigned int c;
+};
+
+struct vcs_poll_data {
+ struct notifier_block notifier;
+ unsigned int cons_num;
+ int event;
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+};
+
+struct tiocl_selection {
+ short unsigned int xs;
+ short unsigned int ys;
+ short unsigned int xe;
+ short unsigned int ye;
+ short unsigned int sel_mode;
+};
+
+struct vc_selection {
+ struct mutex lock;
+ struct vc_data *cons;
+ char *buffer;
+ unsigned int buf_len;
+ volatile int start;
+ int end;
+};
+
+typedef unsigned char u_char;
+
+struct keyboard_notifier_param {
+ struct vc_data *vc;
+ int down;
+ int shift;
+ int ledstate;
+ unsigned int value;
+};
+
+struct kbd_struct {
+ unsigned char lockstate;
+ unsigned char slockstate;
+ unsigned char ledmode: 1;
+ unsigned char ledflagstate: 4;
+ char: 3;
+ unsigned char default_ledflagstate: 4;
+ unsigned char kbdmode: 3;
+ char: 1;
+ unsigned char modeflags: 5;
+};
+
+struct kbentry {
+ unsigned char kb_table;
+ unsigned char kb_index;
+ short unsigned int kb_value;
+};
+
+struct kbsentry {
+ unsigned char kb_func;
+ unsigned char kb_string[512];
+};
+
+struct kbdiacr {
+ unsigned char diacr;
+ unsigned char base;
+ unsigned char result;
+};
+
+struct kbdiacrs {
+ unsigned int kb_cnt;
+ struct kbdiacr kbdiacr[256];
+};
+
+struct kbdiacrsuc {
+ unsigned int kb_cnt;
+ struct kbdiacruc kbdiacruc[256];
+};
+
+struct kbkeycode {
+ unsigned int scancode;
+ unsigned int keycode;
+};
+
+typedef void k_handler_fn(struct vc_data *, unsigned char, char);
+
+typedef void fn_handler_fn(struct vc_data *);
+
+struct getset_keycode_data {
+ struct input_keymap_entry ke;
+ int error;
+};
+
+typedef short unsigned int u_short;
+
+struct uni_pagedir {
+ u16 **uni_pgdir[32];
+ long unsigned int refcount;
+ long unsigned int sum;
+ unsigned char *inverse_translations[4];
+ u16 *inverse_trans_unicode;
+};
+
+typedef uint32_t char32_t;
+
+struct uni_screen {
+ char32_t *lines[0];
+};
+
+struct con_driver {
+ const struct consw *con;
+ const char *desc;
+ struct device *dev;
+ int node;
+ int first;
+ int last;
+ int flag;
+};
+
+enum {
+ blank_off = 0,
+ blank_normal_wait = 1,
+ blank_vesa_wait = 2,
+};
+
+enum {
+ EPecma = 0,
+ EPdec = 1,
+ EPeq = 2,
+ EPgt = 3,
+ EPlt = 4,
+};
+
+struct rgb {
+ u8 r;
+ u8 g;
+ u8 b;
+};
+
+enum {
+ ESnormal = 0,
+ ESesc = 1,
+ ESsquare = 2,
+ ESgetpars = 3,
+ ESfunckey = 4,
+ EShash = 5,
+ ESsetG0 = 6,
+ ESsetG1 = 7,
+ ESpercent = 8,
+ EScsiignore = 9,
+ ESnonstd = 10,
+ ESpalette = 11,
+ ESosc = 12,
+};
+
+struct interval {
+ uint32_t first;
+ uint32_t last;
+};
+
+struct hv_ops;
+
+struct hvc_struct {
+ struct tty_port port;
+ spinlock_t lock;
+ int index;
+ int do_wakeup;
+ char *outbuf;
+ int outbuf_size;
+ int n_outbuf;
+ uint32_t vtermno;
+ const struct hv_ops *ops;
+ int irq_requested;
+ int data;
+ struct winsize ws;
+ struct work_struct tty_resize;
+ struct list_head next;
+ long unsigned int flags;
+};
+
+struct hv_ops {
+ int (*get_chars)(uint32_t, char *, int);
+ int (*put_chars)(uint32_t, const char *, int);
+ int (*flush)(uint32_t, bool);
+ int (*notifier_add)(struct hvc_struct *, int);
+ void (*notifier_del)(struct hvc_struct *, int);
+ void (*notifier_hangup)(struct hvc_struct *, int);
+ int (*tiocmget)(struct hvc_struct *);
+ int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int);
+ void (*dtr_rts)(struct hvc_struct *, int);
+};
+
+struct uart_driver {
+ struct module *owner;
+ const char *driver_name;
+ const char *dev_name;
+ int major;
+ int minor;
+ int nr;
+ struct console *cons;
+ struct uart_state *state;
+ struct tty_driver *tty_driver;
+};
+
+struct uart_match {
+ struct uart_port *port;
+ struct uart_driver *driver;
+};
+
+enum hwparam_type {
+ hwparam_ioport = 0,
+ hwparam_iomem = 1,
+ hwparam_ioport_or_iomem = 2,
+ hwparam_irq = 3,
+ hwparam_dma = 4,
+ hwparam_dma_addr = 5,
+ hwparam_other = 6,
+};
+
+struct plat_serial8250_port {
+ long unsigned int iobase;
+ void *membase;
+ resource_size_t mapbase;
+ unsigned int irq;
+ long unsigned int irqflags;
+ unsigned int uartclk;
+ void *private_data;
+ unsigned char regshift;
+ unsigned char iotype;
+ unsigned char hub6;
+ unsigned char has_sysrq;
+ upf_t flags;
+ unsigned int type;
+ unsigned int (*serial_in)(struct uart_port *, int);
+ void (*serial_out)(struct uart_port *, int, int);
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
+ unsigned int (*get_mctrl)(struct uart_port *);
+ int (*handle_irq)(struct uart_port *);
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
+ void (*handle_break)(struct uart_port *);
+};
+
+enum {
+ PLAT8250_DEV_LEGACY = -1,
+ PLAT8250_DEV_PLATFORM = 0,
+ PLAT8250_DEV_PLATFORM1 = 1,
+ PLAT8250_DEV_PLATFORM2 = 2,
+ PLAT8250_DEV_FOURPORT = 3,
+ PLAT8250_DEV_ACCENT = 4,
+ PLAT8250_DEV_BOCA = 5,
+ PLAT8250_DEV_EXAR_ST16C554 = 6,
+ PLAT8250_DEV_HUB6 = 7,
+ PLAT8250_DEV_AU1X00 = 8,
+ PLAT8250_DEV_SM501 = 9,
+};
+
+struct uart_8250_port;
+
+struct uart_8250_ops {
+ int (*setup_irq)(struct uart_8250_port *);
+ void (*release_irq)(struct uart_8250_port *);
+};
+
+struct mctrl_gpios;
+
+struct uart_8250_dma;
+
+struct uart_8250_em485;
+
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer;
+ struct list_head list;
+ u32 capabilities;
+ short unsigned int bugs;
+ bool fifo_bug;
+ unsigned int tx_loadsz;
+ unsigned char acr;
+ unsigned char fcr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask;
+ unsigned char mcr_force;
+ unsigned char cur_iotype;
+ unsigned int rpm_tx_active;
+ unsigned char canary;
+ unsigned char probe;
+ struct mctrl_gpios *gpios;
+ unsigned char lsr_saved_flags;
+ unsigned char msr_saved_flags;
+ struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
+ int (*dl_read)(struct uart_8250_port *);
+ void (*dl_write)(struct uart_8250_port *, int);
+ struct uart_8250_em485 *em485;
+ void (*rs485_start_tx)(struct uart_8250_port *);
+ void (*rs485_stop_tx)(struct uart_8250_port *);
+ struct delayed_work overrun_backoff;
+ u32 overrun_backoff_time_ms;
+};
+
+struct uart_8250_em485 {
+ struct hrtimer start_tx_timer;
+ struct hrtimer stop_tx_timer;
+ struct hrtimer *active_timer;
+ struct uart_8250_port *port;
+ unsigned int tx_stopped: 1;
+};
+
+struct dma_chan___2;
+
+typedef bool (*dma_filter_fn)(struct dma_chan___2 *, void *);
+
+enum dma_transfer_direction {
+ DMA_MEM_TO_MEM = 0,
+ DMA_MEM_TO_DEV = 1,
+ DMA_DEV_TO_MEM = 2,
+ DMA_DEV_TO_DEV = 3,
+ DMA_TRANS_NONE = 4,
+};
+
+enum dma_slave_buswidth {
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+};
+
+struct dma_slave_config {
+ enum dma_transfer_direction direction;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+ u32 src_port_window_size;
+ u32 dst_port_window_size;
+ bool device_fc;
+ unsigned int slave_id;
+};
+
+typedef s32 dma_cookie_t;
+
+struct uart_8250_dma {
+ int (*tx_dma)(struct uart_8250_port *);
+ int (*rx_dma)(struct uart_8250_port *);
+ dma_filter_fn fn;
+ void *rx_param;
+ void *tx_param;
+ struct dma_slave_config rxconf;
+ struct dma_slave_config txconf;
+ struct dma_chan___2 *rxchan;
+ struct dma_chan___2 *txchan;
+ phys_addr_t rx_dma_addr;
+ phys_addr_t tx_dma_addr;
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+ void *rx_buf;
+ size_t rx_size;
+ size_t tx_size;
+ unsigned char tx_running;
+ unsigned char tx_err;
+ unsigned char rx_running;
+};
+
+enum dma_status {
+ DMA_COMPLETE = 0,
+ DMA_IN_PROGRESS = 1,
+ DMA_PAUSED = 2,
+ DMA_ERROR = 3,
+};
+
+enum dma_transaction_type {
+ DMA_MEMCPY = 0,
+ DMA_XOR = 1,
+ DMA_PQ = 2,
+ DMA_XOR_VAL = 3,
+ DMA_PQ_VAL = 4,
+ DMA_MEMSET = 5,
+ DMA_MEMSET_SG = 6,
+ DMA_INTERRUPT = 7,
+ DMA_PRIVATE = 8,
+ DMA_ASYNC_TX = 9,
+ DMA_SLAVE = 10,
+ DMA_CYCLIC = 11,
+ DMA_INTERLEAVE = 12,
+ DMA_TX_TYPE_END = 13,
+};
+
+struct data_chunk {
+ size_t size;
+ size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
+};
+
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[0];
+};
+
+enum dma_ctrl_flags {
+ DMA_PREP_INTERRUPT = 1,
+ DMA_CTRL_ACK = 2,
+ DMA_PREP_PQ_DISABLE_P = 4,
+ DMA_PREP_PQ_DISABLE_Q = 8,
+ DMA_PREP_CONTINUE = 16,
+ DMA_PREP_FENCE = 32,
+ DMA_CTRL_REUSE = 64,
+ DMA_PREP_CMD = 128,
+};
+
+enum sum_check_bits {
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
+};
+
+enum sum_check_flags {
+ SUM_CHECK_P_RESULT = 1,
+ SUM_CHECK_Q_RESULT = 2,
+};
+
+typedef struct {
+ long unsigned int bits[1];
+} dma_cap_mask_t;
+
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = 1,
+ DESC_METADATA_ENGINE = 2,
+};
+
+struct dma_chan_percpu {
+ long unsigned int memcpy_count;
+ long unsigned int bytes_transferred;
+};
+
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *, void *);
+};
+
+struct dma_device;
+
+struct dma_chan_dev;
+
+struct dma_chan___2 {
+ struct dma_device *device;
+ struct device *slave;
+ dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
+ int chan_id;
+ struct dma_chan_dev *dev;
+ const char *name;
+ char *dbg_client_name;
+ struct list_head device_node;
+ struct dma_chan_percpu *local;
+ int client_count;
+ int table_count;
+ struct dma_router *router;
+ void *route_data;
+ void *private;
+};
+
+struct dma_slave_map;
+
+struct dma_filter {
+ dma_filter_fn fn;
+ int mapcnt;
+ const struct dma_slave_map *map;
+};
+
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_tx_state;
+
+struct dma_device {
+ struct kref ref;
+ unsigned int chancnt;
+ unsigned int privatecnt;
+ struct list_head channels;
+ struct list_head global_node;
+ struct dma_filter filter;
+ dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
+ short unsigned int max_xor;
+ short unsigned int max_pq;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
+ int dev_id;
+ struct device *dev;
+ struct module *owner;
+ struct ida chan_ida;
+ struct mutex chan_mutex;
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ u32 max_burst;
+ bool descriptor_reuse;
+ enum dma_residue_granularity residue_granularity;
+ int (*device_alloc_chan_resources)(struct dma_chan___2 *);
+ void (*device_free_chan_resources)(struct dma_chan___2 *);
+ struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan___2 *, dma_addr_t, dma_addr_t, size_t, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan___2 *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan___2 *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan___2 *, dma_addr_t, int, size_t, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, int, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan___2 *, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *);
+ struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan___2 *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan___2 *, struct dma_interleaved_template *, long unsigned int);
+ struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan___2 *, dma_addr_t, u64, long unsigned int);
+ int (*device_config)(struct dma_chan___2 *, struct dma_slave_config *);
+ int (*device_pause)(struct dma_chan___2 *);
+ int (*device_resume)(struct dma_chan___2 *);
+ int (*device_terminate_all)(struct dma_chan___2 *);
+ void (*device_synchronize)(struct dma_chan___2 *);
+ enum dma_status (*device_tx_status)(struct dma_chan___2 *, dma_cookie_t, struct dma_tx_state *);
+ void (*device_issue_pending)(struct dma_chan___2 *);
+ void (*device_release)(struct dma_device *);
+ void (*dbg_summary_show)(struct seq_file *, struct dma_device *);
+ struct dentry *dbg_dev_root;
+};
+
+struct dma_chan_dev {
+ struct dma_chan___2 *chan;
+ struct device device;
+ int dev_id;
+};
+
+typedef void (*dma_async_tx_callback)(void *);
+
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0,
+ DMA_TRANS_READ_FAILED = 1,
+ DMA_TRANS_WRITE_FAILED = 2,
+ DMA_TRANS_ABORTED = 3,
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *);
+
+struct dmaengine_unmap_data {
+ u8 map_cnt;
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[0];
+};
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *, void *, size_t);
+ void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *);
+ int (*set_len)(struct dma_async_tx_descriptor *, size_t);
+};
+
+struct dma_async_tx_descriptor {
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags;
+ dma_addr_t phys;
+ struct dma_chan___2 *chan;
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *);
+ int (*desc_free)(struct dma_async_tx_descriptor *);
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+ struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
+};
+
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+ u32 in_flight_bytes;
+};
+
+struct dma_slave_map {
+ const char *devname;
+ const char *slave;
+ void *param;
+};
+
+struct old_serial_port {
+ unsigned int uart;
+ unsigned int baud_base;
+ unsigned int port;
+ unsigned int irq;
+ upf_t flags;
+ unsigned char io_type;
+ unsigned char *iomem_base;
+ short unsigned int iomem_reg_shift;
+};
+
+struct irq_info {
+ struct hlist_node node;
+ int irq;
+ spinlock_t lock;
+ struct list_head *head;
+};
+
+struct serial8250_config {
+ const char *name;
+ short unsigned int fifo_size;
+ short unsigned int tx_loadsz;
+ unsigned char fcr;
+ unsigned char rxtrig_bytes[4];
+ unsigned int flags;
+};
+
+struct pciserial_board {
+ unsigned int flags;
+ unsigned int num_ports;
+ unsigned int base_baud;
+ unsigned int uart_offset;
+ unsigned int reg_shift;
+ unsigned int first_offset;
+};
+
+struct serial_private;
+
+struct pci_serial_quirk {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+ int (*probe)(struct pci_dev *);
+ int (*init)(struct pci_dev *);
+ int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int);
+ void (*exit)(struct pci_dev *);
+};
+
+struct serial_private {
+ struct pci_dev *dev;
+ unsigned int nr;
+ struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
+ int line[0];
+};
+
+struct f815xxa_data {
+ spinlock_t lock;
+ int idx;
+};
+
+struct timedia_struct {
+ int num;
+ const short unsigned int *ids;
+};
+
+struct quatech_feature {
+ u16 devid;
+ bool amcc;
+};
+
+enum pci_board_num_t {
+ pbn_default = 0,
+ pbn_b0_1_115200 = 1,
+ pbn_b0_2_115200 = 2,
+ pbn_b0_4_115200 = 3,
+ pbn_b0_5_115200 = 4,
+ pbn_b0_8_115200 = 5,
+ pbn_b0_1_921600 = 6,
+ pbn_b0_2_921600 = 7,
+ pbn_b0_4_921600 = 8,
+ pbn_b0_2_1130000 = 9,
+ pbn_b0_4_1152000 = 10,
+ pbn_b0_4_1250000 = 11,
+ pbn_b0_2_1843200 = 12,
+ pbn_b0_4_1843200 = 13,
+ pbn_b0_1_4000000 = 14,
+ pbn_b0_bt_1_115200 = 15,
+ pbn_b0_bt_2_115200 = 16,
+ pbn_b0_bt_4_115200 = 17,
+ pbn_b0_bt_8_115200 = 18,
+ pbn_b0_bt_1_460800 = 19,
+ pbn_b0_bt_2_460800 = 20,
+ pbn_b0_bt_4_460800 = 21,
+ pbn_b0_bt_1_921600 = 22,
+ pbn_b0_bt_2_921600 = 23,
+ pbn_b0_bt_4_921600 = 24,
+ pbn_b0_bt_8_921600 = 25,
+ pbn_b1_1_115200 = 26,
+ pbn_b1_2_115200 = 27,
+ pbn_b1_4_115200 = 28,
+ pbn_b1_8_115200 = 29,
+ pbn_b1_16_115200 = 30,
+ pbn_b1_1_921600 = 31,
+ pbn_b1_2_921600 = 32,
+ pbn_b1_4_921600 = 33,
+ pbn_b1_8_921600 = 34,
+ pbn_b1_2_1250000 = 35,
+ pbn_b1_bt_1_115200 = 36,
+ pbn_b1_bt_2_115200 = 37,
+ pbn_b1_bt_4_115200 = 38,
+ pbn_b1_bt_2_921600 = 39,
+ pbn_b1_1_1382400 = 40,
+ pbn_b1_2_1382400 = 41,
+ pbn_b1_4_1382400 = 42,
+ pbn_b1_8_1382400 = 43,
+ pbn_b2_1_115200 = 44,
+ pbn_b2_2_115200 = 45,
+ pbn_b2_4_115200 = 46,
+ pbn_b2_8_115200 = 47,
+ pbn_b2_1_460800 = 48,
+ pbn_b2_4_460800 = 49,
+ pbn_b2_8_460800 = 50,
+ pbn_b2_16_460800 = 51,
+ pbn_b2_1_921600 = 52,
+ pbn_b2_4_921600 = 53,
+ pbn_b2_8_921600 = 54,
+ pbn_b2_8_1152000 = 55,
+ pbn_b2_bt_1_115200 = 56,
+ pbn_b2_bt_2_115200 = 57,
+ pbn_b2_bt_4_115200 = 58,
+ pbn_b2_bt_2_921600 = 59,
+ pbn_b2_bt_4_921600 = 60,
+ pbn_b3_2_115200 = 61,
+ pbn_b3_4_115200 = 62,
+ pbn_b3_8_115200 = 63,
+ pbn_b4_bt_2_921600 = 64,
+ pbn_b4_bt_4_921600 = 65,
+ pbn_b4_bt_8_921600 = 66,
+ pbn_panacom = 67,
+ pbn_panacom2 = 68,
+ pbn_panacom4 = 69,
+ pbn_plx_romulus = 70,
+ pbn_endrun_2_4000000 = 71,
+ pbn_oxsemi = 72,
+ pbn_oxsemi_1_4000000 = 73,
+ pbn_oxsemi_2_4000000 = 74,
+ pbn_oxsemi_4_4000000 = 75,
+ pbn_oxsemi_8_4000000 = 76,
+ pbn_intel_i960 = 77,
+ pbn_sgi_ioc3 = 78,
+ pbn_computone_4 = 79,
+ pbn_computone_6 = 80,
+ pbn_computone_8 = 81,
+ pbn_sbsxrsio = 82,
+ pbn_pasemi_1682M = 83,
+ pbn_ni8430_2 = 84,
+ pbn_ni8430_4 = 85,
+ pbn_ni8430_8 = 86,
+ pbn_ni8430_16 = 87,
+ pbn_ADDIDATA_PCIe_1_3906250 = 88,
+ pbn_ADDIDATA_PCIe_2_3906250 = 89,
+ pbn_ADDIDATA_PCIe_4_3906250 = 90,
+ pbn_ADDIDATA_PCIe_8_3906250 = 91,
+ pbn_ce4100_1_115200 = 92,
+ pbn_omegapci = 93,
+ pbn_NETMOS9900_2s_115200 = 94,
+ pbn_brcm_trumanage = 95,
+ pbn_fintek_4 = 96,
+ pbn_fintek_8 = 97,
+ pbn_fintek_12 = 98,
+ pbn_fintek_F81504A = 99,
+ pbn_fintek_F81508A = 100,
+ pbn_fintek_F81512A = 101,
+ pbn_wch382_2 = 102,
+ pbn_wch384_4 = 103,
+ pbn_pericom_PI7C9X7951 = 104,
+ pbn_pericom_PI7C9X7952 = 105,
+ pbn_pericom_PI7C9X7954 = 106,
+ pbn_pericom_PI7C9X7958 = 107,
+ pbn_sunix_pci_1s = 108,
+ pbn_sunix_pci_2s = 109,
+ pbn_sunix_pci_4s = 110,
+ pbn_sunix_pci_8s = 111,
+ pbn_sunix_pci_16s = 112,
+ pbn_moxa8250_2p = 113,
+ pbn_moxa8250_4p = 114,
+ pbn_moxa8250_8p = 115,
+};
+
+struct memdev {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+ fmode_t fmode;
+};
+
+struct timer_rand_state {
+ cycles_t last_time;
+ long int last_delta;
+ long int last_delta2;
+};
+
+struct trace_event_raw_add_device_randomness {
+ struct trace_entry ent;
+ int bytes;
+ long unsigned int IP;
+ char __data[0];
+};
+
+struct trace_event_raw_random__mix_pool_bytes {
+ struct trace_entry ent;
+ const char *pool_name;
+ int bytes;
+ long unsigned int IP;
+ char __data[0];
+};
+
+struct trace_event_raw_credit_entropy_bits {
+ struct trace_entry ent;
+ const char *pool_name;
+ int bits;
+ int entropy_count;
+ long unsigned int IP;
+ char __data[0];
+};
+
+struct trace_event_raw_push_to_pool {
+ struct trace_entry ent;
+ const char *pool_name;
+ int pool_bits;
+ int input_bits;
+ char __data[0];
+};
+
+struct trace_event_raw_debit_entropy {
+ struct trace_entry ent;
+ const char *pool_name;
+ int debit_bits;
+ char __data[0];
+};
+
+struct trace_event_raw_add_input_randomness {
+ struct trace_entry ent;
+ int input_bits;
+ char __data[0];
+};
+
+struct trace_event_raw_add_disk_randomness {
+ struct trace_entry ent;
+ dev_t dev;
+ int input_bits;
+ char __data[0];
+};
+
+struct trace_event_raw_xfer_secondary_pool {
+ struct trace_entry ent;
+ const char *pool_name;
+ int xfer_bits;
+ int request_bits;
+ int pool_entropy;
+ int input_entropy;
+ char __data[0];
+};
+
+struct trace_event_raw_random__get_random_bytes {
+ struct trace_entry ent;
+ int nbytes;
+ long unsigned int IP;
+ char __data[0];
+};
+
+struct trace_event_raw_random__extract_entropy {
+ struct trace_entry ent;
+ const char *pool_name;
+ int nbytes;
+ int entropy_count;
+ long unsigned int IP;
+ char __data[0];
+};
+
+struct trace_event_raw_random_read {
+ struct trace_entry ent;
+ int got_bits;
+ int need_bits;
+ int pool_left;
+ int input_left;
+ char __data[0];
+};
+
+struct trace_event_raw_urandom_read {
+ struct trace_entry ent;
+ int got_bits;
+ int pool_left;
+ int input_left;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_add_device_randomness {};
+
+struct trace_event_data_offsets_random__mix_pool_bytes {};
+
+struct trace_event_data_offsets_credit_entropy_bits {};
+
+struct trace_event_data_offsets_push_to_pool {};
+
+struct trace_event_data_offsets_debit_entropy {};
+
+struct trace_event_data_offsets_add_input_randomness {};
+
+struct trace_event_data_offsets_add_disk_randomness {};
+
+struct trace_event_data_offsets_xfer_secondary_pool {};
+
+struct trace_event_data_offsets_random__get_random_bytes {};
+
+struct trace_event_data_offsets_random__extract_entropy {};
+
+struct trace_event_data_offsets_random_read {};
+
+struct trace_event_data_offsets_urandom_read {};
+
+typedef void (*btf_trace_add_device_randomness)(void *, int, long unsigned int);
+
+typedef void (*btf_trace_mix_pool_bytes)(void *, const char *, int, long unsigned int);
+
+typedef void (*btf_trace_mix_pool_bytes_nolock)(void *, const char *, int, long unsigned int);
+
+typedef void (*btf_trace_credit_entropy_bits)(void *, const char *, int, int, long unsigned int);
+
+typedef void (*btf_trace_push_to_pool)(void *, const char *, int, int);
+
+typedef void (*btf_trace_debit_entropy)(void *, const char *, int);
+
+typedef void (*btf_trace_add_input_randomness)(void *, int);
+
+typedef void (*btf_trace_add_disk_randomness)(void *, dev_t, int);
+
+typedef void (*btf_trace_xfer_secondary_pool)(void *, const char *, int, int, int, int);
+
+typedef void (*btf_trace_get_random_bytes)(void *, int, long unsigned int);
+
+typedef void (*btf_trace_get_random_bytes_arch)(void *, int, long unsigned int);
+
+typedef void (*btf_trace_extract_entropy)(void *, const char *, int, int, long unsigned int);
+
+typedef void (*btf_trace_extract_entropy_user)(void *, const char *, int, int, long unsigned int);
+
+typedef void (*btf_trace_random_read)(void *, int, int, int, int);
+
+typedef void (*btf_trace_urandom_read)(void *, int, int, int);
+
+struct poolinfo {
+ int poolbitshift;
+ int poolwords;
+ int poolbytes;
+ int poolfracbits;
+ int tap1;
+ int tap2;
+ int tap3;
+ int tap4;
+ int tap5;
+};
+
+struct crng_state {
+ __u32 state[16];
+ long unsigned int init_time;
+ spinlock_t lock;
+};
+
+struct entropy_store {
+ const struct poolinfo *poolinfo;
+ __u32 *pool;
+ const char *name;
+ spinlock_t lock;
+ short unsigned int add_ptr;
+ short unsigned int input_rotate;
+ int entropy_count;
+ unsigned int initialized: 1;
+ unsigned int last_data_init: 1;
+ __u8 last_data[10];
+};
+
+struct fast_pool {
+ __u32 pool[4];
+ long unsigned int last;
+ short unsigned int reg_idx;
+ unsigned char count;
+};
+
+struct batched_entropy {
+ union {
+ u64 entropy_u64[8];
+ u32 entropy_u32[16];
+ };
+ unsigned int position;
+ spinlock_t batch_lock;
+};
+
+struct virtio_console_config {
+ __u16 cols;
+ __u16 rows;
+ __u32 max_nr_ports;
+ __u32 emerg_wr;
+};
+
+struct virtio_console_control {
+ __virtio32 id;
+ __virtio16 event;
+ __virtio16 value;
+};
+
+struct ports_driver_data {
+ struct class *class;
+ struct dentry *debugfs_dir;
+ struct list_head portdevs;
+ unsigned int next_vtermno;
+ struct list_head consoles;
+};
+
+struct console___3 {
+ struct list_head list;
+ struct hvc_struct *hvc;
+ struct winsize ws;
+ u32 vtermno;
+};
+
+struct port_buffer {
+ char *buf;
+ size_t size;
+ size_t len;
+ size_t offset;
+ dma_addr_t dma;
+ struct device *dev;
+ struct list_head list;
+ unsigned int sgpages;
+ struct scatterlist sg[0];
+};
+
+struct ports_device {
+ struct list_head list;
+ struct work_struct control_work;
+ struct work_struct config_work;
+ struct list_head ports;
+ spinlock_t ports_lock;
+ spinlock_t c_ivq_lock;
+ spinlock_t c_ovq_lock;
+ u32 max_nr_ports;
+ struct virtio_device *vdev;
+ struct virtqueue *c_ivq;
+ struct virtqueue *c_ovq;
+ struct virtio_console_control cpkt;
+ struct virtqueue **in_vqs;
+ struct virtqueue **out_vqs;
+ int chr_major;
+};
+
+struct port_stats {
+ long unsigned int bytes_sent;
+ long unsigned int bytes_received;
+ long unsigned int bytes_discarded;
+};
+
+struct port {
+ struct list_head list;
+ struct ports_device *portdev;
+ struct port_buffer *inbuf;
+ spinlock_t inbuf_lock;
+ spinlock_t outvq_lock;
+ struct virtqueue *in_vq;
+ struct virtqueue *out_vq;
+ struct dentry *debugfs_file;
+ struct port_stats stats;
+ struct console___3 cons;
+ struct cdev *cdev;
+ struct device *dev;
+ struct kref kref;
+ wait_queue_head_t waitqueue;
+ char *name;
+ struct fasync_struct *async_queue;
+ u32 id;
+ bool outvq_full;
+ bool host_connected;
+ bool guest_connected;
+};
+
+struct sg_list {
+ unsigned int n;
+ unsigned int size;
+ size_t len;
+ struct scatterlist *sg;
+};
+
+struct hwrng {
+ const char *name;
+ int (*init)(struct hwrng *);
+ void (*cleanup)(struct hwrng *);
+ int (*data_present)(struct hwrng *, int);
+ int (*data_read)(struct hwrng *, u32 *);
+ int (*read)(struct hwrng *, void *, size_t, bool);
+ long unsigned int priv;
+ short unsigned int quality;
+ struct list_head list;
+ struct kref ref;
+ struct completion cleanup_done;
+};
+
+struct virtrng_info {
+ struct hwrng hwrng;
+ struct virtqueue *vq;
+ struct completion have_data;
+ char name[25];
+ unsigned int data_avail;
+ int index;
+ bool busy;
+ bool hwrng_register_done;
+ bool hwrng_removed;
+};
+
+struct vga_device {
+ struct list_head list;
+ struct pci_dev *pdev;
+ unsigned int decodes;
+ unsigned int owns;
+ unsigned int locks;
+ unsigned int io_lock_cnt;
+ unsigned int mem_lock_cnt;
+ unsigned int io_norm_cnt;
+ unsigned int mem_norm_cnt;
+ bool bridge_has_one_vga;
+ void *cookie;
+ void (*irq_set_state)(void *, bool);
+ unsigned int (*set_vga_decode)(void *, bool);
+};
+
+struct vga_arb_user_card {
+ struct pci_dev *pdev;
+ unsigned int mem_cnt;
+ unsigned int io_cnt;
+};
+
+struct vga_arb_private {
+ struct list_head list;
+ struct pci_dev *target;
+ struct vga_arb_user_card cards[16];
+ spinlock_t lock;
+};
+
+struct component_ops {
+ int (*bind)(struct device *, struct device *, void *);
+ void (*unbind)(struct device *, struct device *, void *);
+};
+
+struct component_master_ops {
+ int (*bind)(struct device *);
+ void (*unbind)(struct device *);
+};
+
+struct component;
+
+struct component_match_array {
+ void *data;
+ int (*compare)(struct device *, void *);
+ int (*compare_typed)(struct device *, int, void *);
+ void (*release)(struct device *, void *);
+ struct component *component;
+ bool duplicate;
+};
+
+struct master;
+
+struct component {
+ struct list_head node;
+ struct master *master;
+ bool bound;
+ const struct component_ops *ops;
+ int subcomponent;
+ struct device *dev;
+};
+
+struct component_match {
+ size_t alloc;
+ size_t num;
+ struct component_match_array *compare;
+};
+
+struct master {
+ struct list_head node;
+ bool bound;
+ const struct component_master_ops *ops;
+ struct device *dev;
+ struct component_match *match;
+ struct dentry *dentry;
+};
+
+enum dpm_order {
+ DPM_ORDER_NONE = 0,
+ DPM_ORDER_DEV_AFTER_PARENT = 1,
+ DPM_ORDER_PARENT_BEFORE_DEV = 2,
+ DPM_ORDER_DEV_LAST = 3,
+};
+
+struct subsys_private {
+ struct kset subsys;
+ struct kset *devices_kset;
+ struct list_head interfaces;
+ struct mutex mutex;
+ struct kset *drivers_kset;
+ struct klist klist_devices;
+ struct klist klist_drivers;
+ struct blocking_notifier_head bus_notifier;
+ unsigned int drivers_autoprobe: 1;
+ struct bus_type *bus;
+ struct kset glue_dirs;
+ struct class *class;
+};
+
+struct class_interface {
+ struct list_head node;
+ struct class *class;
+ int (*add_dev)(struct device *, struct class_interface *);
+ void (*remove_dev)(struct device *, struct class_interface *);
+};
+
+struct driver_private {
+ struct kobject kobj;
+ struct klist klist_devices;
+ struct klist_node knode_bus;
+ struct module_kobject *mkobj;
+ struct device_driver *driver;
+};
+
+enum device_link_state {
+ DL_STATE_NONE = -1,
+ DL_STATE_DORMANT = 0,
+ DL_STATE_AVAILABLE = 1,
+ DL_STATE_CONSUMER_PROBE = 2,
+ DL_STATE_ACTIVE = 3,
+ DL_STATE_SUPPLIER_UNBIND = 4,
+};
+
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ enum device_link_state status;
+ u32 flags;
+ refcount_t rpm_active;
+ struct kref kref;
+ struct callback_head callback_head;
+ bool supplier_preactivated;
+};
+
+struct device_private {
+ struct klist klist_children;
+ struct klist_node knode_parent;
+ struct klist_node knode_driver;
+ struct klist_node knode_bus;
+ struct klist_node knode_class;
+ struct list_head deferred_probe;
+ struct device_driver *async_driver;
+ struct device *device;
+ u8 dead: 1;
+};
+
+union device_attr_group_devres {
+ const struct attribute_group *group;
+ const struct attribute_group **groups;
+};
+
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+struct root_device {
+ struct device dev;
+ struct module *owner;
+};
+
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+struct subsys_interface {
+ const char *name;
+ struct bus_type *subsys;
+ struct list_head node;
+ int (*add_dev)(struct device *, struct subsys_interface *);
+ void (*remove_dev)(struct device *, struct subsys_interface *);
+};
+
+struct device_attach_data {
+ struct device *dev;
+ bool check_async;
+ bool want_async;
+ bool have_async;
+};
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct class *, struct class_attribute *, char *);
+ ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t);
+};
+
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+struct class_compat {
+ struct kobject *kobj;
+};
+
+struct platform_object {
+ struct platform_device pdev;
+ char name[0];
+};
+
+struct cpu_attr {
+ struct device_attribute attr;
+ const struct cpumask * const map;
+};
+
+typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
+
+struct probe {
+ struct probe *next;
+ dev_t dev;
+ long unsigned int range;
+ struct module *owner;
+ kobj_probe_t *get;
+ int (*lock)(dev_t, void *);
+ void *data;
+};
+
+struct kobj_map___2 {
+ struct probe *probes[255];
+ struct mutex *lock;
+};
+
+typedef int (*dr_match_t)(struct device *, void *, void *);
+
+struct devres_node {
+ struct list_head entry;
+ dr_release_t release;
+};
+
+struct devres {
+ struct devres_node node;
+ u8 data[0];
+};
+
+struct devres_group {
+ struct devres_node node[2];
+ void *id;
+ int color;
+};
+
+struct action_devres {
+ void *data;
+ void (*action)(void *);
+};
+
+struct pages_devres {
+ long unsigned int addr;
+ unsigned int order;
+};
+
+struct attribute_container {
+ struct list_head node;
+ struct klist containers;
+ struct class *class;
+ const struct attribute_group *grp;
+ struct device_attribute **attrs;
+ int (*match)(struct attribute_container *, struct device *);
+ long unsigned int flags;
+};
+
+struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+ struct device classdev;
+};
+
+struct transport_container;
+
+struct transport_class {
+ struct class class;
+ int (*setup)(struct transport_container *, struct device *, struct device *);
+ int (*configure)(struct transport_container *, struct device *, struct device *);
+ int (*remove)(struct transport_container *, struct device *, struct device *);
+};
+
+struct transport_container {
+ struct attribute_container ac;
+ const struct attribute_group *statistics;
+};
+
+struct anon_transport_class {
+ struct transport_class tclass;
+ struct attribute_container container;
+};
+
+struct reset_control;
+
+struct mii_bus;
+
+struct mdio_device {
+ struct device dev;
+ struct mii_bus *bus;
+ char modalias[32];
+ int (*bus_match)(struct device *, struct device_driver *);
+ void (*device_free)(struct mdio_device *);
+ void (*device_remove)(struct mdio_device *);
+ int addr;
+ int flags;
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_ctrl;
+ unsigned int reset_assert_delay;
+ unsigned int reset_deassert_delay;
+};
+
+struct phy_c45_device_ids {
+ u32 devices_in_package;
+ u32 device_ids[8];
+};
+
+enum phy_state {
+ PHY_DOWN = 0,
+ PHY_READY = 1,
+ PHY_HALTED = 2,
+ PHY_UP = 3,
+ PHY_RUNNING = 4,
+ PHY_NOLINK = 5,
+ PHY_CABLETEST = 6,
+};
+
+typedef enum {
+ PHY_INTERFACE_MODE_NA = 0,
+ PHY_INTERFACE_MODE_INTERNAL = 1,
+ PHY_INTERFACE_MODE_MII = 2,
+ PHY_INTERFACE_MODE_GMII = 3,
+ PHY_INTERFACE_MODE_SGMII = 4,
+ PHY_INTERFACE_MODE_TBI = 5,
+ PHY_INTERFACE_MODE_REVMII = 6,
+ PHY_INTERFACE_MODE_RMII = 7,
+ PHY_INTERFACE_MODE_RGMII = 8,
+ PHY_INTERFACE_MODE_RGMII_ID = 9,
+ PHY_INTERFACE_MODE_RGMII_RXID = 10,
+ PHY_INTERFACE_MODE_RGMII_TXID = 11,
+ PHY_INTERFACE_MODE_RTBI = 12,
+ PHY_INTERFACE_MODE_SMII = 13,
+ PHY_INTERFACE_MODE_XGMII = 14,
+ PHY_INTERFACE_MODE_XLGMII = 15,
+ PHY_INTERFACE_MODE_MOCA = 16,
+ PHY_INTERFACE_MODE_QSGMII = 17,
+ PHY_INTERFACE_MODE_TRGMII = 18,
+ PHY_INTERFACE_MODE_1000BASEX = 19,
+ PHY_INTERFACE_MODE_2500BASEX = 20,
+ PHY_INTERFACE_MODE_RXAUI = 21,
+ PHY_INTERFACE_MODE_XAUI = 22,
+ PHY_INTERFACE_MODE_10GBASER = 23,
+ PHY_INTERFACE_MODE_USXGMII = 24,
+ PHY_INTERFACE_MODE_10GKR = 25,
+ PHY_INTERFACE_MODE_MAX = 26,
+} phy_interface_t;
+
+struct phylink;
+
+struct phy_driver;
+
+struct phy_package_shared;
+
+struct mii_timestamper;
+
+struct phy_device {
+ struct mdio_device mdio;
+ struct phy_driver *drv;
+ u32 phy_id;
+ struct phy_c45_device_ids c45_ids;
+ unsigned int is_c45: 1;
+ unsigned int is_internal: 1;
+ unsigned int is_pseudo_fixed_link: 1;
+ unsigned int is_gigabit_capable: 1;
+ unsigned int has_fixups: 1;
+ unsigned int suspended: 1;
+ unsigned int suspended_by_mdio_bus: 1;
+ unsigned int sysfs_links: 1;
+ unsigned int loopback_enabled: 1;
+ unsigned int downshifted_rate: 1;
+ unsigned int autoneg: 1;
+ unsigned int link: 1;
+ unsigned int autoneg_complete: 1;
+ unsigned int interrupts: 1;
+ enum phy_state state;
+ u32 dev_flags;
+ phy_interface_t interface;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+ u8 master_slave_get;
+ u8 master_slave_set;
+ u8 master_slave_state;
+ long unsigned int supported[2];
+ long unsigned int advertising[2];
+ long unsigned int lp_advertising[2];
+ long unsigned int adv_old[2];
+ u32 eee_broken_modes;
+ int irq;
+ void *priv;
+ struct phy_package_shared *shared;
+ struct sk_buff *skb;
+ void *ehdr;
+ struct nlattr *nest;
+ struct delayed_work state_queue;
+ struct mutex lock;
+ bool sfp_bus_attached;
+ struct sfp_bus *sfp_bus;
+ struct phylink *phylink;
+ struct net_device *attached_dev;
+ struct mii_timestamper *mii_ts;
+ u8 mdix;
+ u8 mdix_ctrl;
+ void (*phy_link_change)(struct phy_device *, bool);
+ void (*adjust_link)(struct net_device *);
+};
+
+struct mdio_bus_stats {
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t writes;
+ u64_stats_t reads;
+ struct u64_stats_sync syncp;
+};
+
+struct mii_bus {
+ struct module *owner;
+ const char *name;
+ char id[61];
+ void *priv;
+ int (*read)(struct mii_bus *, int, int);
+ int (*write)(struct mii_bus *, int, int, u16);
+ int (*reset)(struct mii_bus *);
+ struct mdio_bus_stats stats[32];
+ unsigned int is_managed: 1;
+ unsigned int is_managed_registered: 1;
+ struct mutex mdio_lock;
+ struct device *parent;
+ enum {
+ MDIOBUS_ALLOCATED = 1,
+ MDIOBUS_REGISTERED = 2,
+ MDIOBUS_UNREGISTERED = 3,
+ MDIOBUS_RELEASED = 4,
+ } state;
+ struct device dev;
+ struct mdio_device *mdio_map[32];
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
+ int irq[32];
+ int reset_delay_us;
+ struct gpio_desc *reset_gpiod;
+ struct mutex shared_lock;
+ struct phy_package_shared *shared[32];
+};
+
+struct mdio_driver_common {
+ struct device_driver driver;
+ int flags;
+};
+
+struct mii_timestamper {
+ bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int);
+ void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int);
+ int (*hwtstamp)(struct mii_timestamper *, struct ifreq *);
+ void (*link_state)(struct mii_timestamper *, struct phy_device *);
+ int (*ts_info)(struct mii_timestamper *, struct ethtool_ts_info *);
+ struct device *device;
+};
+
+struct phy_package_shared {
+ int addr;
+ refcount_t refcnt;
+ long unsigned int flags;
+ size_t priv_size;
+ void *priv;
+};
+
+struct phy_tdr_config;
+
+struct phy_driver {
+ struct mdio_driver_common mdiodrv;
+ u32 phy_id;
+ char *name;
+ u32 phy_id_mask;
+ const long unsigned int * const features;
+ u32 flags;
+ const void *driver_data;
+ int (*soft_reset)(struct phy_device *);
+ int (*config_init)(struct phy_device *);
+ int (*probe)(struct phy_device *);
+ int (*get_features)(struct phy_device *);
+ int (*suspend)(struct phy_device *);
+ int (*resume)(struct phy_device *);
+ int (*config_aneg)(struct phy_device *);
+ int (*aneg_done)(struct phy_device *);
+ int (*read_status)(struct phy_device *);
+ int (*ack_interrupt)(struct phy_device *);
+ int (*config_intr)(struct phy_device *);
+ int (*did_interrupt)(struct phy_device *);
+ irqreturn_t (*handle_interrupt)(struct phy_device *);
+ void (*remove)(struct phy_device *);
+ int (*match_phy_device)(struct phy_device *);
+ int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *);
+ void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *);
+ void (*link_change_notify)(struct phy_device *);
+ int (*read_mmd)(struct phy_device *, int, u16);
+ int (*write_mmd)(struct phy_device *, int, u16, u16);
+ int (*read_page)(struct phy_device *);
+ int (*write_page)(struct phy_device *, int);
+ int (*module_info)(struct phy_device *, struct ethtool_modinfo *);
+ int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *);
+ int (*cable_test_start)(struct phy_device *);
+ int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *);
+ int (*cable_test_get_status)(struct phy_device *, bool *);
+ int (*get_sset_count)(struct phy_device *);
+ void (*get_strings)(struct phy_device *, u8 *);
+ void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *);
+ int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *);
+ int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *);
+ int (*set_loopback)(struct phy_device *, bool);
+ int (*get_sqi)(struct phy_device *);
+ int (*get_sqi_max)(struct phy_device *);
+};
+
+struct phy_tdr_config {
+ u32 first;
+ u32 last;
+ u32 step;
+ s8 pair;
+};
+
+struct device_connection {
+ struct fwnode_handle *fwnode;
+ const char *endpoint[2];
+ const char *id;
+ struct list_head list;
+};
+
+typedef void * (*devcon_match_fn_t)(struct device_connection *, int, void *);
+
+struct software_node;
+
+struct software_node_ref_args {
+ const struct software_node *node;
+ unsigned int nargs;
+ u64 args[8];
+};
+
+struct software_node {
+ const char *name;
+ const struct software_node *parent;
+ const struct property_entry *properties;
+};
+
+struct swnode {
+ int id;
+ struct kobject kobj;
+ struct fwnode_handle fwnode;
+ const struct software_node *node;
+ struct ida child_ids;
+ struct list_head entry;
+ struct list_head children;
+ struct swnode *parent;
+ unsigned int allocated: 1;
+};
+
+struct req {
+ struct req *next;
+ struct completion done;
+ int err;
+ const char *name;
+ umode_t mode;
+ kuid_t uid;
+ kgid_t gid;
+ struct device *dev;
+};
+
+struct pm_clk_notifier_block {
+ struct notifier_block nb;
+ struct dev_pm_domain *pm_domain;
+ char *con_ids[0];
+};
+
+struct firmware {
+ size_t size;
+ const u8 *data;
+ void *priv;
+};
+
+struct builtin_fw {
+ char *name;
+ void *data;
+ long unsigned int size;
+};
+
+enum fw_opt {
+ FW_OPT_UEVENT = 1,
+ FW_OPT_NOWAIT = 2,
+ FW_OPT_USERHELPER = 4,
+ FW_OPT_NO_WARN = 8,
+ FW_OPT_NOCACHE = 16,
+ FW_OPT_NOFALLBACK_SYSFS = 32,
+ FW_OPT_FALLBACK_PLATFORM = 64,
+};
+
+enum fw_status {
+ FW_STATUS_UNKNOWN = 0,
+ FW_STATUS_LOADING = 1,
+ FW_STATUS_DONE = 2,
+ FW_STATUS_ABORTED = 3,
+};
+
+struct fw_state {
+ struct completion completion;
+ enum fw_status status;
+};
+
+struct firmware_cache;
+
+struct fw_priv {
+ struct kref ref;
+ struct list_head list;
+ struct firmware_cache *fwc;
+ struct fw_state fw_st;
+ void *data;
+ size_t size;
+ size_t allocated_size;
+ const char *fw_name;
+};
+
+struct firmware_cache {
+ spinlock_t lock;
+ struct list_head head;
+ int state;
+};
+
+struct firmware_work {
+ struct work_struct work;
+ struct module *module;
+ const char *name;
+ struct device *device;
+ void *context;
+ void (*cont)(const struct firmware *, void *);
+ u32 opt_flags;
+};
+
+typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
+
+struct for_each_memory_block_cb_data {
+ walk_memory_blocks_func_t func;
+ void *arg;
+};
+
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *);
+
+struct platform_msi_priv_data {
+ struct device *dev;
+ void *host_data;
+ msi_alloc_info_t arg;
+ irq_write_msi_msg_t write_msg;
+ int devid;
+};
+
+struct brd_device {
+ int brd_number;
+ struct request_queue *brd_queue;
+ struct gendisk *brd_disk;
+ struct list_head brd_list;
+ spinlock_t brd_lock;
+ struct xarray brd_pages;
+};
+
+typedef long unsigned int __kernel_old_dev_t;
+
+enum {
+ LO_FLAGS_READ_ONLY = 1,
+ LO_FLAGS_AUTOCLEAR = 4,
+ LO_FLAGS_PARTSCAN = 8,
+ LO_FLAGS_DIRECT_IO = 16,
+};
+
+struct loop_info {
+ int lo_number;
+ __kernel_old_dev_t lo_device;
+ long unsigned int lo_inode;
+ __kernel_old_dev_t lo_rdevice;
+ int lo_offset;
+ int lo_encrypt_type;
+ int lo_encrypt_key_size;
+ int lo_flags;
+ char lo_name[64];
+ unsigned char lo_encrypt_key[32];
+ long unsigned int lo_init[2];
+ char reserved[4];
+};
+
+struct loop_info64 {
+ __u64 lo_device;
+ __u64 lo_inode;
+ __u64 lo_rdevice;
+ __u64 lo_offset;
+ __u64 lo_sizelimit;
+ __u32 lo_number;
+ __u32 lo_encrypt_type;
+ __u32 lo_encrypt_key_size;
+ __u32 lo_flags;
+ __u8 lo_file_name[64];
+ __u8 lo_crypt_name[64];
+ __u8 lo_encrypt_key[32];
+ __u64 lo_init[2];
+};
+
+struct loop_config {
+ __u32 fd;
+ __u32 block_size;
+ struct loop_info64 info;
+ __u64 __reserved[8];
+};
+
+enum {
+ Lo_unbound = 0,
+ Lo_bound = 1,
+ Lo_rundown = 2,
+};
+
+struct loop_func_table;
+
+struct loop_device {
+ int lo_number;
+ atomic_t lo_refcnt;
+ loff_t lo_offset;
+ loff_t lo_sizelimit;
+ int lo_flags;
+ int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t);
+ char lo_file_name[64];
+ char lo_crypt_name[64];
+ char lo_encrypt_key[32];
+ int lo_encrypt_key_size;
+ struct loop_func_table *lo_encryption;
+ __u32 lo_init[2];
+ kuid_t lo_key_owner;
+ int (*ioctl)(struct loop_device *, int, long unsigned int);
+ struct file *lo_backing_file;
+ struct block_device *lo_device;
+ void *key_data;
+ gfp_t old_gfp_mask;
+ spinlock_t lo_lock;
+ int lo_state;
+ struct kthread_worker worker;
+ struct task_struct *worker_task;
+ bool use_dio;
+ bool sysfs_inited;
+ struct request_queue *lo_queue;
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *lo_disk;
+};
+
+struct loop_func_table {
+ int number;
+ int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t);
+ int (*init)(struct loop_device *, const struct loop_info64 *);
+ int (*release)(struct loop_device *);
+ int (*ioctl)(struct loop_device *, int, long unsigned int);
+ struct module *owner;
+};
+
+struct loop_cmd {
+ struct kthread_work work;
+ bool use_aio;
+ atomic_t ref;
+ long int ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+ struct cgroup_subsys_state *css;
+};
+
+struct virtio_blk_geometry {
+ __u16 cylinders;
+ __u8 heads;
+ __u8 sectors;
+};
+
+struct virtio_blk_config {
+ __u64 capacity;
+ __u32 size_max;
+ __u32 seg_max;
+ struct virtio_blk_geometry geometry;
+ __u32 blk_size;
+ __u8 physical_block_exp;
+ __u8 alignment_offset;
+ __u16 min_io_size;
+ __u32 opt_io_size;
+ __u8 wce;
+ __u8 unused;
+ __u16 num_queues;
+ __u32 max_discard_sectors;
+ __u32 max_discard_seg;
+ __u32 discard_sector_alignment;
+ __u32 max_write_zeroes_sectors;
+ __u32 max_write_zeroes_seg;
+ __u8 write_zeroes_may_unmap;
+ __u8 unused1[3];
+} __attribute__((packed));
+
+struct virtio_blk_outhdr {
+ __virtio32 type;
+ __virtio32 ioprio;
+ __virtio64 sector;
+};
+
+struct virtio_blk_discard_write_zeroes {
+ __le64 sector;
+ __le32 num_sectors;
+ __le32 flags;
+};
+
+struct virtio_blk_vq {
+ struct virtqueue *vq;
+ spinlock_t lock;
+ char name[16];
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct virtio_blk {
+ struct mutex vdev_mutex;
+ struct virtio_device *vdev;
+ struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
+ struct work_struct config_work;
+ refcount_t refs;
+ unsigned int sg_elems;
+ int index;
+ int num_vqs;
+ struct virtio_blk_vq *vqs;
+};
+
+struct virtblk_req {
+ struct virtio_blk_outhdr out_hdr;
+ u8 status;
+ struct scatterlist sg[0];
+};
+
+struct badrange {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+struct nvdimm {
+ long unsigned int flags;
+ void *provider_data;
+ long unsigned int cmd_mask;
+ struct device dev;
+ atomic_t busy;
+ int id;
+ int num_flush;
+ struct resource *flush_wpq;
+ const char *dimm_id;
+ struct {
+ const struct nvdimm_security_ops *ops;
+ long unsigned int flags;
+ long unsigned int ext_flags;
+ unsigned int overwrite_tmo;
+ struct kernfs_node *overwrite_state;
+ } sec;
+ struct delayed_work dwork;
+};
+
+struct nvdimm_bus {
+ struct nvdimm_bus_descriptor *nd_desc;
+ wait_queue_head_t wait;
+ struct list_head list;
+ struct device dev;
+ int id;
+ int probe_active;
+ atomic_t ioctl_active;
+ struct list_head mapping_list;
+ struct mutex reconfig_mutex;
+ struct badrange badrange;
+};
+
+enum {
+ NSINDEX_SIG_LEN = 16,
+ NSINDEX_ALIGN = 256,
+ NSINDEX_SEQ_MASK = 3,
+ NSLABEL_UUID_LEN = 16,
+ NSLABEL_NAME_LEN = 64,
+ NSLABEL_FLAG_ROLABEL = 1,
+ NSLABEL_FLAG_LOCAL = 2,
+ NSLABEL_FLAG_BTT = 4,
+ NSLABEL_FLAG_UPDATING = 8,
+ BTT_ALIGN = 4096,
+ BTTINFO_SIG_LEN = 16,
+ BTTINFO_UUID_LEN = 16,
+ BTTINFO_FLAG_ERROR = 1,
+ BTTINFO_MAJOR_VERSION = 1,
+ ND_LABEL_MIN_SIZE = 1024,
+ ND_LABEL_ID_SIZE = 50,
+ ND_NSINDEX_INIT = 1,
+};
+
+enum {
+ LOCK_BUS = 0,
+ LOCK_NDCTL = 1,
+ LOCK_REGION = 2,
+ LOCK_DIMM = 2,
+ LOCK_NAMESPACE = 3,
+ LOCK_CLAIM = 4,
+};
+
+struct nvdimm_map {
+ struct nvdimm_bus *nvdimm_bus;
+ struct list_head list;
+ resource_size_t offset;
+ long unsigned int flags;
+ size_t size;
+ union {
+ void *mem;
+ void *iomem;
+ };
+ struct kref kref;
+};
+
+struct badrange_entry {
+ u64 start;
+ u64 length;
+ struct list_head list;
+};
+
+struct nvdimm_drvdata;
+
+struct nd_mapping {
+ struct nvdimm *nvdimm;
+ u64 start;
+ u64 size;
+ int position;
+ struct list_head labels;
+ struct mutex lock;
+ struct nvdimm_drvdata *ndd;
+};
+
+struct nd_percpu_lane;
+
+struct nd_region {
+ struct device dev;
+ struct ida ns_ida;
+ struct ida btt_ida;
+ struct ida pfn_ida;
+ struct ida dax_ida;
+ long unsigned int flags;
+ struct device *ns_seed;
+ struct device *btt_seed;
+ struct device *pfn_seed;
+ struct device *dax_seed;
+ long unsigned int align;
+ u16 ndr_mappings;
+ u64 ndr_size;
+ u64 ndr_start;
+ int id;
+ int num_lanes;
+ int ro;
+ int numa_node;
+ int target_node;
+ void *provider_data;
+ struct kernfs_node *bb_state;
+ struct badblocks bb;
+ struct nd_interleave_set *nd_set;
+ struct nd_percpu_lane *lane;
+ int (*flush)(struct nd_region *, struct bio *);
+ struct nd_mapping mapping[0];
+};
+
+struct nd_cmd_get_config_size {
+ __u32 status;
+ __u32 config_size;
+ __u32 max_xfer;
+};
+
+struct nd_cmd_vendor_hdr {
+ __u32 opcode;
+ __u32 in_length;
+ __u8 in_buf[0];
+};
+
+enum nvdimm_claim_class {
+ NVDIMM_CCLASS_NONE = 0,
+ NVDIMM_CCLASS_BTT = 1,
+ NVDIMM_CCLASS_BTT2 = 2,
+ NVDIMM_CCLASS_PFN = 3,
+ NVDIMM_CCLASS_DAX = 4,
+ NVDIMM_CCLASS_UNKNOWN = 5,
+};
+
+struct nd_device_driver {
+ struct device_driver drv;
+ long unsigned int type;
+ int (*probe)(struct device *);
+ int (*remove)(struct device *);
+ void (*shutdown)(struct device *);
+ void (*notify)(struct device *, enum nvdimm_event);
+};
+
+struct nd_namespace_common {
+ int force_raw;
+ struct device dev;
+ struct device *claim;
+ enum nvdimm_claim_class claim_class;
+ int (*rw_bytes)(struct nd_namespace_common *, resource_size_t, void *, size_t, int, long unsigned int);
+};
+
+struct nd_namespace_io {
+ struct nd_namespace_common common;
+ struct resource res;
+ resource_size_t size;
+ void *addr;
+ struct badblocks bb;
+};
+
+struct nvdimm_drvdata {
+ struct device *dev;
+ int nslabel_size;
+ struct nd_cmd_get_config_size nsarea;
+ void *data;
+ int ns_current;
+ int ns_next;
+ struct resource dpa;
+ struct kref kref;
+};
+
+struct nd_percpu_lane {
+ int count;
+ spinlock_t lock;
+};
+
+struct btt;
+
+struct nd_btt {
+ struct device dev;
+ struct nd_namespace_common *ndns;
+ struct btt *btt;
+ long unsigned int lbasize;
+ u64 size;
+ u8 *uuid;
+ int id;
+ int initial_offset;
+ u16 version_major;
+ u16 version_minor;
+};
+
+enum nd_pfn_mode {
+ PFN_MODE_NONE = 0,
+ PFN_MODE_RAM = 1,
+ PFN_MODE_PMEM = 2,
+};
+
+struct nd_pfn_sb;
+
+struct nd_pfn {
+ int id;
+ u8 *uuid;
+ struct device dev;
+ long unsigned int align;
+ long unsigned int npfns;
+ enum nd_pfn_mode mode;
+ struct nd_pfn_sb *pfn_sb;
+ struct nd_namespace_common *ndns;
+};
+
+struct nd_pfn_sb {
+ u8 signature[16];
+ u8 uuid[16];
+ u8 parent_uuid[16];
+ __le32 flags;
+ __le16 version_major;
+ __le16 version_minor;
+ __le64 dataoff;
+ __le64 npfns;
+ __le32 mode;
+ __le32 start_pad;
+ __le32 end_trunc;
+ __le32 align;
+ __le32 page_size;
+ __le16 page_struct_size;
+ u8 padding[3994];
+ __le64 checksum;
+};
+
+struct nd_dax {
+ struct nd_pfn nd_pfn;
+};
+
+enum nd_async_mode {
+ ND_SYNC = 0,
+ ND_ASYNC = 1,
+};
+
+struct clear_badblocks_context {
+ resource_size_t phys;
+ resource_size_t cleared;
+};
+
+enum nd_ioctl_mode {
+ BUS_IOCTL = 0,
+ DIMM_IOCTL = 1,
+};
+
+struct nd_blk_region {
+ int (*enable)(struct nvdimm_bus *, struct device *);
+ int (*do_io)(struct nd_blk_region *, resource_size_t, void *, u64, int);
+ void *blk_provider_data;
+ struct nd_region nd_region;
+};
+
+struct nd_label_id {
+ char id[50];
+};
+
+struct blk_alloc_info {
+ struct nd_mapping *nd_mapping;
+ resource_size_t available;
+ resource_size_t busy;
+ struct resource *res;
+};
+
+enum nd_driver_flags {
+ ND_DRIVER_DIMM = 2,
+ ND_DRIVER_REGION_PMEM = 4,
+ ND_DRIVER_REGION_BLK = 8,
+ ND_DRIVER_NAMESPACE_IO = 16,
+ ND_DRIVER_NAMESPACE_PMEM = 32,
+ ND_DRIVER_NAMESPACE_BLK = 64,
+ ND_DRIVER_DAX_PMEM = 128,
+};
+
+struct nd_namespace_index {
+ u8 sig[16];
+ u8 flags[3];
+ u8 labelsize;
+ __le32 seq;
+ __le64 myoff;
+ __le64 mysize;
+ __le64 otheroff;
+ __le64 labeloff;
+ __le32 nslot;
+ __le16 major;
+ __le16 minor;
+ __le64 checksum;
+ u8 free[0];
+};
+
+struct nd_namespace_label {
+ u8 uuid[16];
+ u8 name[64];
+ __le32 flags;
+ __le16 nlabel;
+ __le16 position;
+ __le64 isetcookie;
+ __le64 lbasize;
+ __le64 dpa;
+ __le64 rawsize;
+ __le32 slot;
+ u8 align;
+ u8 reserved[3];
+ guid_t type_guid;
+ guid_t abstraction_guid;
+ u8 reserved2[88];
+ __le64 checksum;
+};
+
+enum {
+ ND_MAX_LANES = 256,
+ INT_LBASIZE_ALIGNMENT = 64,
+ NVDIMM_IO_ATOMIC = 1,
+};
+
+struct nd_region_data {
+ int ns_count;
+ int ns_active;
+ unsigned int hints_shift;
+ void *flush_wpq[0];
+};
+
+struct nd_label_ent {
+ struct list_head list;
+ long unsigned int flags;
+ struct nd_namespace_label *label;
+};
+
+struct conflict_context {
+ struct nd_region *nd_region;
+ resource_size_t start;
+ resource_size_t size;
+};
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = 4096,
+};
+
+struct nd_namespace_pmem {
+ struct nd_namespace_io nsio;
+ long unsigned int lbasize;
+ char *alt_name;
+ u8 *uuid;
+ int id;
+};
+
+struct nd_namespace_blk {
+ struct nd_namespace_common common;
+ char *alt_name;
+ u8 *uuid;
+ int id;
+ long unsigned int lbasize;
+ resource_size_t size;
+ int num_resources;
+ struct resource **res;
+};
+
+enum nd_label_flags {
+ ND_LABEL_REAP = 0,
+};
+
+enum alloc_loc {
+ ALLOC_ERR = 0,
+ ALLOC_BEFORE = 1,
+ ALLOC_MID = 2,
+ ALLOC_AFTER = 3,
+};
+
+struct btt {
+ struct gendisk *btt_disk;
+ struct request_queue *btt_queue;
+ struct list_head arena_list;
+ struct dentry *debugfs_dir;
+ struct nd_btt *nd_btt;
+ u64 nlba;
+ long long unsigned int rawsize;
+ u32 lbasize;
+ u32 sector_size;
+ struct nd_region *nd_region;
+ struct mutex init_lock;
+ int init_state;
+ int num_arenas;
+ struct badblocks *phys_bb;
+};
+
+struct nd_gen_sb {
+ char reserved[4088];
+ __le64 checksum;
+};
+
+struct btt_sb {
+ u8 signature[16];
+ u8 uuid[16];
+ u8 parent_uuid[16];
+ __le32 flags;
+ __le16 version_major;
+ __le16 version_minor;
+ __le32 external_lbasize;
+ __le32 external_nlba;
+ __le32 internal_lbasize;
+ __le32 internal_nlba;
+ __le32 nfree;
+ __le32 infosize;
+ __le64 nextoff;
+ __le64 dataoff;
+ __le64 mapoff;
+ __le64 logoff;
+ __le64 info2off;
+ u8 padding[3968];
+ __le64 checksum;
+};
+
+struct dax_operations {
+ long int (*direct_access)(struct dax_device *, long unsigned int, long int, void **, pfn_t *);
+ bool (*dax_supported)(struct dax_device *, struct block_device *, int, sector_t, sector_t);
+ size_t (*copy_from_iter)(struct dax_device *, long unsigned int, void *, size_t, struct iov_iter *);
+ size_t (*copy_to_iter)(struct dax_device *, long unsigned int, void *, size_t, struct iov_iter *);
+ int (*zero_page_range)(struct dax_device *, long unsigned int, size_t);
+};
+
+struct pmem_device {
+ phys_addr_t phys_addr;
+ phys_addr_t data_offset;
+ u64 pfn_flags;
+ void *virt_addr;
+ size_t size;
+ u32 pfn_pad;
+ struct kernfs_node *bb_state;
+ struct badblocks bb;
+ struct dax_device *dax_dev;
+ struct gendisk *disk;
+ struct dev_pagemap pgmap;
+};
+
+struct bio_integrity_payload {
+ struct bio *bip_bio;
+ struct bvec_iter bip_iter;
+ short unsigned int bip_slab;
+ short unsigned int bip_vcnt;
+ short unsigned int bip_max_vcnt;
+ short unsigned int bip_flags;
+ struct bvec_iter bio_iter;
+ struct work_struct bip_work;
+ struct bio_vec *bip_vec;
+ struct bio_vec bip_inline_vecs[0];
+};
+
+enum btt_init_state {
+ INIT_UNCHECKED = 0,
+ INIT_NOTFOUND = 1,
+ INIT_READY = 2,
+};
+
+struct log_entry {
+ __le32 lba;
+ __le32 old_map;
+ __le32 new_map;
+ __le32 seq;
+};
+
+struct log_group {
+ struct log_entry ent[4];
+};
+
+struct free_entry {
+ u32 block;
+ u8 sub;
+ u8 seq;
+ u8 has_err;
+};
+
+struct aligned_lock {
+ union {
+ spinlock_t lock;
+ u8 cacheline_padding[64];
+ };
+};
+
+struct arena_info {
+ u64 size;
+ u64 external_lba_start;
+ u32 internal_nlba;
+ u32 internal_lbasize;
+ u32 external_nlba;
+ u32 external_lbasize;
+ u32 nfree;
+ u16 version_major;
+ u16 version_minor;
+ u32 sector_size;
+ u64 nextoff;
+ u64 infooff;
+ u64 dataoff;
+ u64 mapoff;
+ u64 logoff;
+ u64 info2off;
+ struct free_entry *freelist;
+ u32 *rtt;
+ struct aligned_lock *map_locks;
+ struct nd_btt *nd_btt;
+ struct list_head list;
+ struct dentry *debugfs_dir;
+ u32 flags;
+ struct mutex err_lock;
+ int log_index[2];
+};
+
+enum log_ent_request {
+ LOG_NEW_ENT = 0,
+ LOG_OLD_ENT = 1,
+};
+
+struct virtio_pmem_config {
+ __u64 start;
+ __u64 size;
+};
+
+struct virtio_pmem {
+ struct virtio_device *vdev;
+ struct virtqueue *req_vq;
+ struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus_descriptor nd_desc;
+ struct list_head req_list;
+ spinlock_t pmem_lock;
+ __u64 start;
+ __u64 size;
+};
+
+struct virtio_pmem_resp {
+ __le32 ret;
+};
+
+struct virtio_pmem_req {
+ __le32 type;
+};
+
+struct virtio_pmem_request {
+ struct virtio_pmem_req req;
+ struct virtio_pmem_resp resp;
+ wait_queue_head_t host_acked;
+ bool done;
+ wait_queue_head_t wq_buf;
+ bool wq_buf_avail;
+ struct list_head list;
+};
+
+struct dax_device {
+ struct hlist_node list;
+ struct inode inode;
+ struct cdev cdev;
+ const char *host;
+ void *private;
+ long unsigned int flags;
+ const struct dax_operations *ops;
+};
+
+enum dax_device_flags {
+ DAXDEV_ALIVE = 0,
+ DAXDEV_WRITE_CACHE = 1,
+ DAXDEV_SYNC = 2,
+};
+
+struct dax_region {
+ int id;
+ int target_node;
+ struct kref kref;
+ struct device *dev;
+ unsigned int align;
+ struct resource res;
+ long long unsigned int pfn_flags;
+};
+
+struct dev_dax {
+ struct dax_region *region;
+ struct dax_device *dax_dev;
+ int target_node;
+ struct device dev;
+ struct dev_pagemap pgmap;
+ struct resource *dax_kmem_res;
+};
+
+enum dev_dax_subsys {
+ DEV_DAX_BUS = 0,
+ DEV_DAX_CLASS = 1,
+};
+
+struct dax_device_driver {
+ struct device_driver drv;
+ struct list_head ids;
+ int match_always;
+};
+
+struct dax_id {
+ struct list_head list;
+ char dev_name[30];
+};
+
+enum id_action {
+ ID_REMOVE = 0,
+ ID_ADD = 1,
+};
+
+struct scsi_sense_hdr {
+ u8 response_code;
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ u8 additional_length;
+};
+
+typedef __u64 blist_flags_t;
+
+enum scsi_device_state {
+ SDEV_CREATED = 1,
+ SDEV_RUNNING = 2,
+ SDEV_CANCEL = 3,
+ SDEV_DEL = 4,
+ SDEV_QUIESCE = 5,
+ SDEV_OFFLINE = 6,
+ SDEV_TRANSPORT_OFFLINE = 7,
+ SDEV_BLOCK = 8,
+ SDEV_CREATED_BLOCK = 9,
+};
+
+struct scsi_vpd {
+ struct callback_head rcu;
+ int len;
+ unsigned char data[0];
+};
+
+struct Scsi_Host;
+
+struct scsi_target;
+
+struct scsi_device_handler;
+
+struct scsi_device {
+ struct Scsi_Host *host;
+ struct request_queue *request_queue;
+ struct list_head siblings;
+ struct list_head same_target_siblings;
+ atomic_t device_busy;
+ atomic_t device_blocked;
+ spinlock_t list_lock;
+ struct list_head starved_entry;
+ short unsigned int queue_depth;
+ short unsigned int max_queue_depth;
+ short unsigned int last_queue_full_depth;
+ short unsigned int last_queue_full_count;
+ long unsigned int last_queue_full_time;
+ long unsigned int queue_ramp_up_period;
+ long unsigned int last_queue_ramp_up;
+ unsigned int id;
+ unsigned int channel;
+ u64 lun;
+ unsigned int manufacturer;
+ unsigned int sector_size;
+ void *hostdata;
+ unsigned char type;
+ char scsi_level;
+ char inq_periph_qual;
+ struct mutex inquiry_mutex;
+ unsigned char inquiry_len;
+ unsigned char *inquiry;
+ const char *vendor;
+ const char *model;
+ const char *rev;
+ struct scsi_vpd *vpd_pg0;
+ struct scsi_vpd *vpd_pg83;
+ struct scsi_vpd *vpd_pg80;
+ struct scsi_vpd *vpd_pg89;
+ unsigned char current_tag;
+ struct scsi_target *sdev_target;
+ blist_flags_t sdev_bflags;
+ unsigned int eh_timeout;
+ unsigned int removable: 1;
+ unsigned int changed: 1;
+ unsigned int busy: 1;
+ unsigned int lockable: 1;
+ unsigned int locked: 1;
+ unsigned int borken: 1;
+ unsigned int disconnect: 1;
+ unsigned int soft_reset: 1;
+ unsigned int sdtr: 1;
+ unsigned int wdtr: 1;
+ unsigned int ppr: 1;
+ unsigned int tagged_supported: 1;
+ unsigned int simple_tags: 1;
+ unsigned int was_reset: 1;
+ unsigned int expecting_cc_ua: 1;
+ unsigned int use_10_for_rw: 1;
+ unsigned int use_10_for_ms: 1;
+ unsigned int set_dbd_for_ms: 1;
+ unsigned int no_report_opcodes: 1;
+ unsigned int no_write_same: 1;
+ unsigned int use_16_for_rw: 1;
+ unsigned int skip_ms_page_8: 1;
+ unsigned int skip_ms_page_3f: 1;
+ unsigned int skip_vpd_pages: 1;
+ unsigned int try_vpd_pages: 1;
+ unsigned int use_192_bytes_for_3f: 1;
+ unsigned int no_start_on_add: 1;
+ unsigned int allow_restart: 1;
+ unsigned int manage_start_stop: 1;
+ unsigned int start_stop_pwr_cond: 1;
+ unsigned int no_uld_attach: 1;
+ unsigned int select_no_atn: 1;
+ unsigned int fix_capacity: 1;
+ unsigned int guess_capacity: 1;
+ unsigned int retry_hwerror: 1;
+ unsigned int last_sector_bug: 1;
+ unsigned int no_read_disc_info: 1;
+ unsigned int no_read_capacity_16: 1;
+ unsigned int try_rc_10_first: 1;
+ unsigned int security_supported: 1;
+ unsigned int is_visible: 1;
+ unsigned int wce_default_on: 1;
+ unsigned int no_dif: 1;
+ unsigned int broken_fua: 1;
+ unsigned int lun_in_cdb: 1;
+ unsigned int unmap_limit_for_ws: 1;
+ unsigned int rpm_autosuspend: 1;
+ bool offline_already;
+ atomic_t disk_events_disable_depth;
+ long unsigned int supported_events[1];
+ long unsigned int pending_events[1];
+ struct list_head event_list;
+ struct work_struct event_work;
+ unsigned int max_device_blocked;
+ atomic_t iorequest_cnt;
+ atomic_t iodone_cnt;
+ atomic_t ioerr_cnt;
+ struct device sdev_gendev;
+ struct device sdev_dev;
+ struct execute_work ew;
+ struct work_struct requeue_work;
+ struct scsi_device_handler *handler;
+ void *handler_data;
+ size_t dma_drain_len;
+ void *dma_drain_buf;
+ unsigned char access_state;
+ struct mutex state_mutex;
+ enum scsi_device_state sdev_state;
+ struct task_struct *quiesced_by;
+ long unsigned int sdev_data[0];
+};
+
+enum scsi_host_state {
+ SHOST_CREATED = 1,
+ SHOST_RUNNING = 2,
+ SHOST_CANCEL = 3,
+ SHOST_DEL = 4,
+ SHOST_RECOVERY = 5,
+ SHOST_CANCEL_RECOVERY = 6,
+ SHOST_DEL_RECOVERY = 7,
+};
+
+struct scsi_host_template;
+
+struct scsi_transport_template;
+
+struct Scsi_Host {
+ struct list_head __devices;
+ struct list_head __targets;
+ struct list_head starved_list;
+ spinlock_t default_lock;
+ spinlock_t *host_lock;
+ struct mutex scan_mutex;
+ struct list_head eh_cmd_q;
+ struct task_struct *ehandler;
+ struct completion *eh_action;
+ wait_queue_head_t host_wait;
+ struct scsi_host_template *hostt;
+ struct scsi_transport_template *transportt;
+ struct blk_mq_tag_set tag_set;
+ atomic_t host_blocked;
+ unsigned int host_failed;
+ unsigned int host_eh_scheduled;
+ unsigned int host_no;
+ int eh_deadline;
+ long unsigned int last_reset;
+ unsigned int max_channel;
+ unsigned int max_id;
+ u64 max_lun;
+ unsigned int unique_id;
+ short unsigned int max_cmd_len;
+ int this_id;
+ int can_queue;
+ short int cmd_per_lun;
+ short unsigned int sg_tablesize;
+ short unsigned int sg_prot_tablesize;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ long unsigned int dma_boundary;
+ long unsigned int virt_boundary_mask;
+ unsigned int nr_hw_queues;
+ unsigned int active_mode: 2;
+ unsigned int unchecked_isa_dma: 1;
+ unsigned int host_self_blocked: 1;
+ unsigned int reverse_ordering: 1;
+ unsigned int tmf_in_progress: 1;
+ unsigned int async_scan: 1;
+ unsigned int eh_noresume: 1;
+ unsigned int no_write_same: 1;
+ unsigned int short_inquiry: 1;
+ unsigned int no_scsi2_lun_in_cdb: 1;
+ char work_q_name[20];
+ struct workqueue_struct *work_q;
+ struct workqueue_struct *tmf_work_q;
+ unsigned int max_host_blocked;
+ unsigned int prot_capabilities;
+ unsigned char prot_guard_type;
+ long unsigned int base;
+ long unsigned int io_port;
+ unsigned char n_io_port;
+ unsigned char dma_channel;
+ unsigned int irq;
+ enum scsi_host_state shost_state;
+ struct device shost_gendev;
+ struct device shost_dev;
+ void *shost_data;
+ struct device *dma_dev;
+ long unsigned int hostdata[0];
+};
+
+enum scsi_target_state {
+ STARGET_CREATED = 1,
+ STARGET_RUNNING = 2,
+ STARGET_REMOVE = 3,
+ STARGET_CREATED_REMOVE = 4,
+ STARGET_DEL = 5,
+};
+
+struct scsi_target {
+ struct scsi_device *starget_sdev_user;
+ struct list_head siblings;
+ struct list_head devices;
+ struct device dev;
+ struct kref reap_ref;
+ unsigned int channel;
+ unsigned int id;
+ unsigned int create: 1;
+ unsigned int single_lun: 1;
+ unsigned int pdt_1f_for_no_lun: 1;
+ unsigned int no_report_luns: 1;
+ unsigned int expecting_lun_change: 1;
+ atomic_t target_busy;
+ atomic_t target_blocked;
+ unsigned int can_queue;
+ unsigned int max_target_blocked;
+ char scsi_level;
+ enum scsi_target_state state;
+ void *hostdata;
+ long unsigned int starget_data[0];
+};
+
+struct scsi_data_buffer {
+ struct sg_table table;
+ unsigned int length;
+};
+
+struct scsi_pointer {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int buffers_residual;
+ dma_addr_t dma_handle;
+ volatile int Status;
+ volatile int Message;
+ volatile int have_data_in;
+ volatile int sent_command;
+ volatile int phase;
+};
+
+struct scsi_cmnd {
+ struct scsi_request req;
+ struct scsi_device *device;
+ struct list_head eh_entry;
+ struct delayed_work abort_work;
+ struct callback_head rcu;
+ int eh_eflags;
+ long unsigned int jiffies_at_alloc;
+ int retries;
+ int allowed;
+ unsigned char prot_op;
+ unsigned char prot_type;
+ unsigned char prot_flags;
+ short unsigned int cmd_len;
+ enum dma_data_direction sc_data_direction;
+ unsigned char *cmnd;
+ struct scsi_data_buffer sdb;
+ struct scsi_data_buffer *prot_sdb;
+ unsigned int underflow;
+ unsigned int transfersize;
+ struct request *request;
+ unsigned char *sense_buffer;
+ void (*scsi_done)(struct scsi_cmnd *);
+ struct scsi_pointer SCp;
+ unsigned char *host_scribble;
+ int result;
+ int flags;
+ long unsigned int state;
+ unsigned char tag;
+ unsigned int extra_len;
+};
+
+enum scsi_prot_operations {
+ SCSI_PROT_NORMAL = 0,
+ SCSI_PROT_READ_INSERT = 1,
+ SCSI_PROT_WRITE_STRIP = 2,
+ SCSI_PROT_READ_STRIP = 3,
+ SCSI_PROT_WRITE_INSERT = 4,
+ SCSI_PROT_READ_PASS = 5,
+ SCSI_PROT_WRITE_PASS = 6,
+};
+
+struct scsi_driver {
+ struct device_driver gendrv;
+ void (*rescan)(struct device *);
+ blk_status_t (*init_command)(struct scsi_cmnd *);
+ void (*uninit_command)(struct scsi_cmnd *);
+ int (*done)(struct scsi_cmnd *);
+ int (*eh_action)(struct scsi_cmnd *, int);
+ void (*eh_reset)(struct scsi_cmnd *);
+};
+
+struct scsi_host_cmd_pool;
+
+struct scsi_host_template {
+ struct module *module;
+ const char *name;
+ const char * (*info)(struct Scsi_Host *);
+ int (*ioctl)(struct scsi_device *, unsigned int, void *);
+ int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *);
+ int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *);
+ int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
+ void (*commit_rqs)(struct Scsi_Host *, u16);
+ int (*eh_abort_handler)(struct scsi_cmnd *);
+ int (*eh_device_reset_handler)(struct scsi_cmnd *);
+ int (*eh_target_reset_handler)(struct scsi_cmnd *);
+ int (*eh_bus_reset_handler)(struct scsi_cmnd *);
+ int (*eh_host_reset_handler)(struct scsi_cmnd *);
+ int (*slave_alloc)(struct scsi_device *);
+ int (*slave_configure)(struct scsi_device *);
+ void (*slave_destroy)(struct scsi_device *);
+ int (*target_alloc)(struct scsi_target *);
+ void (*target_destroy)(struct scsi_target *);
+ int (*scan_finished)(struct Scsi_Host *, long unsigned int);
+ void (*scan_start)(struct Scsi_Host *);
+ int (*change_queue_depth)(struct scsi_device *, int);
+ int (*map_queues)(struct Scsi_Host *);
+ bool (*dma_need_drain)(struct request *);
+ int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *);
+ void (*unlock_native_capacity)(struct scsi_device *);
+ int (*show_info)(struct seq_file *, struct Scsi_Host *);
+ int (*write_info)(struct Scsi_Host *, char *, int);
+ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ int (*host_reset)(struct Scsi_Host *, int);
+ const char *proc_name;
+ struct proc_dir_entry *proc_dir;
+ int can_queue;
+ int this_id;
+ short unsigned int sg_tablesize;
+ short unsigned int sg_prot_tablesize;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ long unsigned int dma_boundary;
+ long unsigned int virt_boundary_mask;
+ short int cmd_per_lun;
+ unsigned char present;
+ int tag_alloc_policy;
+ unsigned int track_queue_depth: 1;
+ unsigned int supported_mode: 2;
+ unsigned int unchecked_isa_dma: 1;
+ unsigned int emulated: 1;
+ unsigned int skip_settle_delay: 1;
+ unsigned int no_write_same: 1;
+ unsigned int max_host_blocked;
+ struct device_attribute **shost_attrs;
+ struct device_attribute **sdev_attrs;
+ const struct attribute_group **sdev_groups;
+ u64 vendor_id;
+ unsigned int cmd_size;
+ struct scsi_host_cmd_pool *cmd_pool;
+ int rpm_autosuspend_delay;
+};
+
+struct trace_event_raw_scsi_dispatch_cmd_start {
+ struct trace_entry ent;
+ unsigned int host_no;
+ unsigned int channel;
+ unsigned int id;
+ unsigned int lun;
+ unsigned int opcode;
+ unsigned int cmd_len;
+ unsigned int data_sglen;
+ unsigned int prot_sglen;
+ unsigned char prot_op;
+ u32 __data_loc_cmnd;
+ char __data[0];
+};
+
+struct trace_event_raw_scsi_dispatch_cmd_error {
+ struct trace_entry ent;
+ unsigned int host_no;
+ unsigned int channel;
+ unsigned int id;
+ unsigned int lun;
+ int rtn;
+ unsigned int opcode;
+ unsigned int cmd_len;
+ unsigned int data_sglen;
+ unsigned int prot_sglen;
+ unsigned char prot_op;
+ u32 __data_loc_cmnd;
+ char __data[0];
+};
+
+struct trace_event_raw_scsi_cmd_done_timeout_template {
+ struct trace_entry ent;
+ unsigned int host_no;
+ unsigned int channel;
+ unsigned int id;
+ unsigned int lun;
+ int result;
+ unsigned int opcode;
+ unsigned int cmd_len;
+ unsigned int data_sglen;
+ unsigned int prot_sglen;
+ unsigned char prot_op;
+ u32 __data_loc_cmnd;
+ char __data[0];
+};
+
+struct trace_event_raw_scsi_eh_wakeup {
+ struct trace_entry ent;
+ unsigned int host_no;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_scsi_dispatch_cmd_start {
+ u32 cmnd;
+};
+
+struct trace_event_data_offsets_scsi_dispatch_cmd_error {
+ u32 cmnd;
+};
+
+struct trace_event_data_offsets_scsi_cmd_done_timeout_template {
+ u32 cmnd;
+};
+
+struct trace_event_data_offsets_scsi_eh_wakeup {};
+
+typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *);
+
+typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int);
+
+typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *);
+
+typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *);
+
+typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *);
+
+struct scsi_transport_template {
+ struct transport_container host_attrs;
+ struct transport_container target_attrs;
+ struct transport_container device_attrs;
+ int (*user_scan)(struct Scsi_Host *, uint, uint, u64);
+ int device_size;
+ int device_private_offset;
+ int target_size;
+ int target_private_offset;
+ int host_size;
+ unsigned int create_work_queue: 1;
+ void (*eh_strategy_handler)(struct Scsi_Host *);
+};
+
+struct scsi_host_busy_iter_data {
+ bool (*fn)(struct scsi_cmnd *, void *, bool);
+ void *priv;
+};
+
+struct scsi_idlun {
+ __u32 dev_id;
+ __u32 host_unique_id;
+};
+
+typedef void (*activate_complete)(void *, int);
+
+struct scsi_device_handler {
+ struct list_head list;
+ struct module *module;
+ const char *name;
+ int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
+ int (*attach)(struct scsi_device *);
+ void (*detach)(struct scsi_device *);
+ int (*activate)(struct scsi_device *, activate_complete, void *);
+ blk_status_t (*prep_fn)(struct scsi_device *, struct request *);
+ int (*set_params)(struct scsi_device *, const char *);
+ void (*rescan)(struct scsi_device *);
+};
+
+struct scsi_eh_save {
+ int result;
+ unsigned int resid_len;
+ int eh_eflags;
+ enum dma_data_direction data_direction;
+ unsigned int underflow;
+ unsigned char cmd_len;
+ unsigned char prot_op;
+ unsigned char *cmnd;
+ struct scsi_data_buffer sdb;
+ unsigned char eh_cmnd[16];
+ struct scatterlist sense_sgl;
+};
+
+struct scsi_varlen_cdb_hdr {
+ __u8 opcode;
+ __u8 control;
+ __u8 misc[5];
+ __u8 additional_cdb_length;
+ __be16 service_action;
+};
+
+struct scsi_mode_data {
+ __u32 length;
+ __u16 block_descriptor_length;
+ __u8 medium_type;
+ __u8 device_specific;
+ __u8 header_length;
+ __u8 longlba: 1;
+};
+
+struct scsi_event {
+ enum scsi_device_event evt_type;
+ struct list_head node;
+};
+
+enum scsi_host_prot_capabilities {
+ SHOST_DIF_TYPE1_PROTECTION = 1,
+ SHOST_DIF_TYPE2_PROTECTION = 2,
+ SHOST_DIF_TYPE3_PROTECTION = 4,
+ SHOST_DIX_TYPE0_PROTECTION = 8,
+ SHOST_DIX_TYPE1_PROTECTION = 16,
+ SHOST_DIX_TYPE2_PROTECTION = 32,
+ SHOST_DIX_TYPE3_PROTECTION = 64,
+};
+
+enum {
+ ACTION_FAIL = 0,
+ ACTION_REPREP = 1,
+ ACTION_RETRY = 2,
+ ACTION_DELAYED_RETRY = 3,
+};
+
+struct scsi_lun {
+ __u8 scsi_lun[8];
+};
+
+enum scsi_timeouts {
+ SCSI_DEFAULT_EH_TIMEOUT = 2500,
+};
+
+enum scsi_scan_mode {
+ SCSI_SCAN_INITIAL = 0,
+ SCSI_SCAN_RESCAN = 1,
+ SCSI_SCAN_MANUAL = 2,
+};
+
+struct async_scan_data {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ struct completion prev_finished;
+};
+
+enum scsi_devinfo_key {
+ SCSI_DEVINFO_GLOBAL = 0,
+ SCSI_DEVINFO_SPI = 1,
+};
+
+struct scsi_dev_info_list {
+ struct list_head dev_info_list;
+ char vendor[8];
+ char model[16];
+ blist_flags_t flags;
+ unsigned int compatible;
+};
+
+struct scsi_dev_info_list_table {
+ struct list_head node;
+ struct list_head scsi_dev_info_list;
+ const char *name;
+ int key;
+};
+
+struct virtio_scsi_cmd_req {
+ __u8 lun[8];
+ __virtio64 tag;
+ __u8 task_attr;
+ __u8 prio;
+ __u8 crn;
+ __u8 cdb[32];
+} __attribute__((packed));
+
+struct virtio_scsi_cmd_req_pi {
+ __u8 lun[8];
+ __virtio64 tag;
+ __u8 task_attr;
+ __u8 prio;
+ __u8 crn;
+ __virtio32 pi_bytesout;
+ __virtio32 pi_bytesin;
+ __u8 cdb[32];
+} __attribute__((packed));
+
+struct virtio_scsi_cmd_resp {
+ __virtio32 sense_len;
+ __virtio32 resid;
+ __virtio16 status_qualifier;
+ __u8 status;
+ __u8 response;
+ __u8 sense[96];
+};
+
+struct virtio_scsi_ctrl_tmf_req {
+ __virtio32 type;
+ __virtio32 subtype;
+ __u8 lun[8];
+ __virtio64 tag;
+};
+
+struct virtio_scsi_ctrl_tmf_resp {
+ __u8 response;
+};
+
+struct virtio_scsi_ctrl_an_req {
+ __virtio32 type;
+ __u8 lun[8];
+ __virtio32 event_requested;
+};
+
+struct virtio_scsi_ctrl_an_resp {
+ __virtio32 event_actual;
+ __u8 response;
+} __attribute__((packed));
+
+struct virtio_scsi_event {
+ __virtio32 event;
+ __u8 lun[8];
+ __virtio32 reason;
+};
+
+struct virtio_scsi_config {
+ __u32 num_queues;
+ __u32 seg_max;
+ __u32 max_sectors;
+ __u32 cmd_per_lun;
+ __u32 event_info_size;
+ __u32 sense_size;
+ __u32 cdb_size;
+ __u16 max_channel;
+ __u16 max_target;
+ __u32 max_lun;
+};
+
+struct virtio_scsi_cmd {
+ struct scsi_cmnd *sc;
+ struct completion *comp;
+ union {
+ struct virtio_scsi_cmd_req cmd;
+ struct virtio_scsi_cmd_req_pi cmd_pi;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ struct virtio_scsi_ctrl_an_req an;
+ } req;
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ struct virtio_scsi_event evt;
+ } resp;
+ long: 8;
+ long: 64;
+} __attribute__((packed));
+
+struct virtio_scsi;
+
+struct virtio_scsi_event_node {
+ struct virtio_scsi *vscsi;
+ struct virtio_scsi_event event;
+ struct work_struct work;
+};
+
+struct virtio_scsi_vq {
+ spinlock_t vq_lock;
+ struct virtqueue *vq;
+};
+
+struct virtio_scsi {
+ struct virtio_device *vdev;
+ struct virtio_scsi_event_node event_list[8];
+ u32 num_queues;
+ struct hlist_node node;
+ bool stop_events;
+ struct virtio_scsi_vq ctrl_vq;
+ struct virtio_scsi_vq event_vq;
+ struct virtio_scsi_vq req_vqs[0];
+};
+
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1,
+ BIP_MAPPED_INTEGRITY = 2,
+ BIP_CTRL_NOCHECK = 4,
+ BIP_DISK_NOCHECK = 8,
+ BIP_IP_CHECKSUM = 16,
+};
+
+enum t10_dif_type {
+ T10_PI_TYPE0_PROTECTION = 0,
+ T10_PI_TYPE1_PROTECTION = 1,
+ T10_PI_TYPE2_PROTECTION = 2,
+ T10_PI_TYPE3_PROTECTION = 3,
+};
+
+enum scsi_prot_flags {
+ SCSI_PROT_TRANSFER_PI = 1,
+ SCSI_PROT_GUARD_CHECK = 2,
+ SCSI_PROT_REF_CHECK = 4,
+ SCSI_PROT_REF_INCREMENT = 8,
+ SCSI_PROT_IP_CHECKSUM = 16,
+};
+
+enum {
+ SD_EXT_CDB_SIZE = 32,
+ SD_MEMPOOL_SIZE = 2,
+};
+
+enum {
+ SD_DEF_XFER_BLOCKS = 65535,
+ SD_MAX_XFER_BLOCKS = -1,
+ SD_MAX_WS10_BLOCKS = 65535,
+ SD_MAX_WS16_BLOCKS = 8388607,
+};
+
+enum {
+ SD_LBP_FULL = 0,
+ SD_LBP_UNMAP = 1,
+ SD_LBP_WS16 = 2,
+ SD_LBP_WS10 = 3,
+ SD_LBP_ZERO = 4,
+ SD_LBP_DISABLE = 5,
+};
+
+enum {
+ SD_ZERO_WRITE = 0,
+ SD_ZERO_WS = 1,
+ SD_ZERO_WS16_UNMAP = 2,
+ SD_ZERO_WS10_UNMAP = 3,
+};
+
+struct opal_dev;
+
+struct scsi_disk {
+ struct scsi_driver *driver;
+ struct scsi_device *device;
+ struct device dev;
+ struct gendisk *disk;
+ struct opal_dev *opal_dev;
+ atomic_t openers;
+ sector_t capacity;
+ u32 max_xfer_blocks;
+ u32 opt_xfer_blocks;
+ u32 max_ws_blocks;
+ u32 max_unmap_blocks;
+ u32 unmap_granularity;
+ u32 unmap_alignment;
+ u32 index;
+ unsigned int physical_block_size;
+ unsigned int max_medium_access_timeouts;
+ unsigned int medium_access_timed_out;
+ u8 media_present;
+ u8 write_prot;
+ u8 protection_type;
+ u8 provisioning_mode;
+ u8 zeroing_mode;
+ unsigned int ATO: 1;
+ unsigned int cache_override: 1;
+ unsigned int WCE: 1;
+ unsigned int RCD: 1;
+ unsigned int DPOFUA: 1;
+ unsigned int first_scan: 1;
+ unsigned int lbpme: 1;
+ unsigned int lbprz: 1;
+ unsigned int lbpu: 1;
+ unsigned int lbpws: 1;
+ unsigned int lbpws10: 1;
+ unsigned int lbpvpd: 1;
+ unsigned int ws10: 1;
+ unsigned int ws16: 1;
+ unsigned int rc_basis: 2;
+ unsigned int zoned: 2;
+ unsigned int urswrz: 1;
+ unsigned int security: 1;
+ unsigned int ignore_medium_access_errors: 1;
+};
+
+struct devprobe2 {
+ struct net_device * (*probe)(int);
+ int status;
+};
+
+enum {
+ NETIF_F_SG_BIT = 0,
+ NETIF_F_IP_CSUM_BIT = 1,
+ __UNUSED_NETIF_F_1 = 2,
+ NETIF_F_HW_CSUM_BIT = 3,
+ NETIF_F_IPV6_CSUM_BIT = 4,
+ NETIF_F_HIGHDMA_BIT = 5,
+ NETIF_F_FRAGLIST_BIT = 6,
+ NETIF_F_HW_VLAN_CTAG_TX_BIT = 7,
+ NETIF_F_HW_VLAN_CTAG_RX_BIT = 8,
+ NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9,
+ NETIF_F_VLAN_CHALLENGED_BIT = 10,
+ NETIF_F_GSO_BIT = 11,
+ NETIF_F_LLTX_BIT = 12,
+ NETIF_F_NETNS_LOCAL_BIT = 13,
+ NETIF_F_GRO_BIT = 14,
+ NETIF_F_LRO_BIT = 15,
+ NETIF_F_GSO_SHIFT = 16,
+ NETIF_F_TSO_BIT = 16,
+ NETIF_F_GSO_ROBUST_BIT = 17,
+ NETIF_F_TSO_ECN_BIT = 18,
+ NETIF_F_TSO_MANGLEID_BIT = 19,
+ NETIF_F_TSO6_BIT = 20,
+ NETIF_F_FSO_BIT = 21,
+ NETIF_F_GSO_GRE_BIT = 22,
+ NETIF_F_GSO_GRE_CSUM_BIT = 23,
+ NETIF_F_GSO_IPXIP4_BIT = 24,
+ NETIF_F_GSO_IPXIP6_BIT = 25,
+ NETIF_F_GSO_UDP_TUNNEL_BIT = 26,
+ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27,
+ NETIF_F_GSO_PARTIAL_BIT = 28,
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29,
+ NETIF_F_GSO_SCTP_BIT = 30,
+ NETIF_F_GSO_ESP_BIT = 31,
+ NETIF_F_GSO_UDP_BIT = 32,
+ NETIF_F_GSO_UDP_L4_BIT = 33,
+ NETIF_F_GSO_FRAGLIST_BIT = 34,
+ NETIF_F_GSO_LAST = 34,
+ NETIF_F_FCOE_CRC_BIT = 35,
+ NETIF_F_SCTP_CRC_BIT = 36,
+ NETIF_F_FCOE_MTU_BIT = 37,
+ NETIF_F_NTUPLE_BIT = 38,
+ NETIF_F_RXHASH_BIT = 39,
+ NETIF_F_RXCSUM_BIT = 40,
+ NETIF_F_NOCACHE_COPY_BIT = 41,
+ NETIF_F_LOOPBACK_BIT = 42,
+ NETIF_F_RXFCS_BIT = 43,
+ NETIF_F_RXALL_BIT = 44,
+ NETIF_F_HW_VLAN_STAG_TX_BIT = 45,
+ NETIF_F_HW_VLAN_STAG_RX_BIT = 46,
+ NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47,
+ NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48,
+ NETIF_F_HW_TC_BIT = 49,
+ NETIF_F_HW_ESP_BIT = 50,
+ NETIF_F_HW_ESP_TX_CSUM_BIT = 51,
+ NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52,
+ NETIF_F_HW_TLS_TX_BIT = 53,
+ NETIF_F_HW_TLS_RX_BIT = 54,
+ NETIF_F_GRO_HW_BIT = 55,
+ NETIF_F_HW_TLS_RECORD_BIT = 56,
+ NETIF_F_GRO_FRAGLIST_BIT = 57,
+ NETIF_F_HW_MACSEC_BIT = 58,
+ NETDEV_FEATURE_COUNT = 59,
+};
+
+enum {
+ SKBTX_HW_TSTAMP = 1,
+ SKBTX_SW_TSTAMP = 2,
+ SKBTX_IN_PROGRESS = 4,
+ SKBTX_DEV_ZEROCOPY = 8,
+ SKBTX_WIFI_STATUS = 16,
+ SKBTX_SHARED_FRAG = 32,
+ SKBTX_SCHED_TSTAMP = 64,
+};
+
+enum netdev_priv_flags {
+ IFF_802_1Q_VLAN = 1,
+ IFF_EBRIDGE = 2,
+ IFF_BONDING = 4,
+ IFF_ISATAP = 8,
+ IFF_WAN_HDLC = 16,
+ IFF_XMIT_DST_RELEASE = 32,
+ IFF_DONT_BRIDGE = 64,
+ IFF_DISABLE_NETPOLL = 128,
+ IFF_MACVLAN_PORT = 256,
+ IFF_BRIDGE_PORT = 512,
+ IFF_OVS_DATAPATH = 1024,
+ IFF_TX_SKB_SHARING = 2048,
+ IFF_UNICAST_FLT = 4096,
+ IFF_TEAM_PORT = 8192,
+ IFF_SUPP_NOFCS = 16384,
+ IFF_LIVE_ADDR_CHANGE = 32768,
+ IFF_MACVLAN = 65536,
+ IFF_XMIT_DST_RELEASE_PERM = 131072,
+ IFF_L3MDEV_MASTER = 262144,
+ IFF_NO_QUEUE = 524288,
+ IFF_OPENVSWITCH = 1048576,
+ IFF_L3MDEV_SLAVE = 2097152,
+ IFF_TEAM = 4194304,
+ IFF_RXFH_CONFIGURED = 8388608,
+ IFF_PHONY_HEADROOM = 16777216,
+ IFF_MACSEC = 33554432,
+ IFF_NO_RX_HANDLER = 67108864,
+ IFF_FAILOVER = 134217728,
+ IFF_FAILOVER_SLAVE = 268435456,
+ IFF_L3MDEV_RX_HANDLER = 536870912,
+ IFF_LIVE_RENAME_OK = 1073741824,
+};
+
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS = 1,
+ ETH_SS_PRIV_FLAGS = 2,
+ ETH_SS_NTUPLE_FILTERS = 3,
+ ETH_SS_FEATURES = 4,
+ ETH_SS_RSS_HASH_FUNCS = 5,
+ ETH_SS_TUNABLES = 6,
+ ETH_SS_PHY_STATS = 7,
+ ETH_SS_PHY_TUNABLES = 8,
+ ETH_SS_LINK_MODES = 9,
+ ETH_SS_MSG_CLASSES = 10,
+ ETH_SS_WOL_MODES = 11,
+ ETH_SS_SOF_TIMESTAMPING = 12,
+ ETH_SS_TS_TX_TYPES = 13,
+ ETH_SS_TS_RX_FILTERS = 14,
+ ETH_SS_COUNT = 15,
+};
+
+struct rt6key {
+ struct in6_addr addr;
+ int plen;
+};
+
+struct rtable;
+
+struct fnhe_hash_bucket;
+
+struct fib_nh_common {
+ struct net_device *nhc_dev;
+ int nhc_oif;
+ unsigned char nhc_scope;
+ u8 nhc_family;
+ u8 nhc_gw_family;
+ unsigned char nhc_flags;
+ struct lwtunnel_state *nhc_lwtstate;
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } nhc_gw;
+ int nhc_weight;
+ atomic_t nhc_upper_bound;
+ struct rtable **nhc_pcpu_rth_output;
+ struct rtable *nhc_rth_input;
+ struct fnhe_hash_bucket *nhc_exceptions;
+};
+
+struct rt6_exception_bucket;
+
+struct fib6_nh {
+ struct fib_nh_common nh_common;
+ struct rt6_info **rt6i_pcpu;
+ struct rt6_exception_bucket *rt6i_exception_bucket;
+};
+
+struct fib6_node;
+
+struct nexthop;
+
+struct fib6_info {
+ struct fib6_table *fib6_table;
+ struct fib6_info *fib6_next;
+ struct fib6_node *fib6_node;
+ union {
+ struct list_head fib6_siblings;
+ struct list_head nh_list;
+ };
+ unsigned int fib6_nsiblings;
+ refcount_t fib6_ref;
+ long unsigned int expires;
+ struct dst_metrics *fib6_metrics;
+ struct rt6key fib6_dst;
+ u32 fib6_flags;
+ struct rt6key fib6_src;
+ struct rt6key fib6_prefsrc;
+ u32 fib6_metric;
+ u8 fib6_protocol;
+ u8 fib6_type;
+ u8 should_flush: 1;
+ u8 dst_nocount: 1;
+ u8 dst_nopolicy: 1;
+ u8 fib6_destroying: 1;
+ u8 offload: 1;
+ u8 trap: 1;
+ u8 unused: 2;
+ struct callback_head rcu;
+ struct nexthop *nh;
+ struct fib6_nh fib6_nh[0];
+};
+
+struct uncached_list;
+
+struct rt6_info {
+ struct dst_entry dst;
+ struct fib6_info *from;
+ int sernum;
+ struct rt6key rt6i_dst;
+ struct rt6key rt6i_src;
+ struct in6_addr rt6i_gateway;
+ struct inet6_dev *rt6i_idev;
+ u32 rt6i_flags;
+ struct list_head rt6i_uncached;
+ struct uncached_list *rt6i_uncached_list;
+ short unsigned int rt6i_nfheader_len;
+};
+
+struct rt6_statistics {
+ __u32 fib_nodes;
+ __u32 fib_route_nodes;
+ __u32 fib_rt_entries;
+ __u32 fib_rt_cache;
+ __u32 fib_discarded_routes;
+ atomic_t fib_rt_alloc;
+ atomic_t fib_rt_uncache;
+};
+
+struct fib6_node {
+ struct fib6_node *parent;
+ struct fib6_node *left;
+ struct fib6_node *right;
+ struct fib6_info *leaf;
+ __u16 fn_bit;
+ __u16 fn_flags;
+ int fn_sernum;
+ struct fib6_info *rr_ptr;
+ struct callback_head rcu;
+};
+
+struct fib6_table {
+ struct hlist_node tb6_hlist;
+ u32 tb6_id;
+ spinlock_t tb6_lock;
+ struct fib6_node tb6_root;
+ struct inet_peer_base tb6_peers;
+ unsigned int flags;
+ unsigned int fib_seq;
+};
+
+typedef union {
+ __be32 a4;
+ __be32 a6[4];
+ struct in6_addr in6;
+} xfrm_address_t;
+
+struct xfrm_id {
+ xfrm_address_t daddr;
+ __be32 spi;
+ __u8 proto;
+};
+
+struct xfrm_sec_ctx {
+ __u8 ctx_doi;
+ __u8 ctx_alg;
+ __u16 ctx_len;
+ __u32 ctx_sid;
+ char ctx_str[0];
+};
+
+struct xfrm_selector {
+ xfrm_address_t daddr;
+ xfrm_address_t saddr;
+ __be16 dport;
+ __be16 dport_mask;
+ __be16 sport;
+ __be16 sport_mask;
+ __u16 family;
+ __u8 prefixlen_d;
+ __u8 prefixlen_s;
+ __u8 proto;
+ int ifindex;
+ __kernel_uid32_t user;
+};
+
+struct xfrm_lifetime_cfg {
+ __u64 soft_byte_limit;
+ __u64 hard_byte_limit;
+ __u64 soft_packet_limit;
+ __u64 hard_packet_limit;
+ __u64 soft_add_expires_seconds;
+ __u64 hard_add_expires_seconds;
+ __u64 soft_use_expires_seconds;
+ __u64 hard_use_expires_seconds;
+};
+
+struct xfrm_lifetime_cur {
+ __u64 bytes;
+ __u64 packets;
+ __u64 add_time;
+ __u64 use_time;
+};
+
+struct xfrm_replay_state {
+ __u32 oseq;
+ __u32 seq;
+ __u32 bitmap;
+};
+
+struct xfrm_replay_state_esn {
+ unsigned int bmp_len;
+ __u32 oseq;
+ __u32 seq;
+ __u32 oseq_hi;
+ __u32 seq_hi;
+ __u32 replay_window;
+ __u32 bmp[0];
+};
+
+struct xfrm_algo {
+ char alg_name[64];
+ unsigned int alg_key_len;
+ char alg_key[0];
+};
+
+struct xfrm_algo_auth {
+ char alg_name[64];
+ unsigned int alg_key_len;
+ unsigned int alg_trunc_len;
+ char alg_key[0];
+};
+
+struct xfrm_algo_aead {
+ char alg_name[64];
+ unsigned int alg_key_len;
+ unsigned int alg_icv_len;
+ char alg_key[0];
+};
+
+struct xfrm_stats {
+ __u32 replay_window;
+ __u32 replay;
+ __u32 integrity_failed;
+};
+
+enum {
+ XFRM_POLICY_TYPE_MAIN = 0,
+ XFRM_POLICY_TYPE_SUB = 1,
+ XFRM_POLICY_TYPE_MAX = 2,
+ XFRM_POLICY_TYPE_ANY = 255,
+};
+
+struct xfrm_encap_tmpl {
+ __u16 encap_type;
+ __be16 encap_sport;
+ __be16 encap_dport;
+ xfrm_address_t encap_oa;
+};
+
+struct xfrm_mark {
+ __u32 v;
+ __u32 m;
+};
+
+struct xfrm_address_filter {
+ xfrm_address_t saddr;
+ xfrm_address_t daddr;
+ __u16 family;
+ __u8 splen;
+ __u8 dplen;
+};
+
+enum {
+ IFLA_UNSPEC = 0,
+ IFLA_ADDRESS = 1,
+ IFLA_BROADCAST = 2,
+ IFLA_IFNAME = 3,
+ IFLA_MTU = 4,
+ IFLA_LINK = 5,
+ IFLA_QDISC = 6,
+ IFLA_STATS = 7,
+ IFLA_COST = 8,
+ IFLA_PRIORITY = 9,
+ IFLA_MASTER = 10,
+ IFLA_WIRELESS = 11,
+ IFLA_PROTINFO = 12,
+ IFLA_TXQLEN = 13,
+ IFLA_MAP = 14,
+ IFLA_WEIGHT = 15,
+ IFLA_OPERSTATE = 16,
+ IFLA_LINKMODE = 17,
+ IFLA_LINKINFO = 18,
+ IFLA_NET_NS_PID = 19,
+ IFLA_IFALIAS = 20,
+ IFLA_NUM_VF = 21,
+ IFLA_VFINFO_LIST = 22,
+ IFLA_STATS64 = 23,
+ IFLA_VF_PORTS = 24,
+ IFLA_PORT_SELF = 25,
+ IFLA_AF_SPEC = 26,
+ IFLA_GROUP = 27,
+ IFLA_NET_NS_FD = 28,
+ IFLA_EXT_MASK = 29,
+ IFLA_PROMISCUITY = 30,
+ IFLA_NUM_TX_QUEUES = 31,
+ IFLA_NUM_RX_QUEUES = 32,
+ IFLA_CARRIER = 33,
+ IFLA_PHYS_PORT_ID = 34,
+ IFLA_CARRIER_CHANGES = 35,
+ IFLA_PHYS_SWITCH_ID = 36,
+ IFLA_LINK_NETNSID = 37,
+ IFLA_PHYS_PORT_NAME = 38,
+ IFLA_PROTO_DOWN = 39,
+ IFLA_GSO_MAX_SEGS = 40,
+ IFLA_GSO_MAX_SIZE = 41,
+ IFLA_PAD = 42,
+ IFLA_XDP = 43,
+ IFLA_EVENT = 44,
+ IFLA_NEW_NETNSID = 45,
+ IFLA_IF_NETNSID = 46,
+ IFLA_TARGET_NETNSID = 46,
+ IFLA_CARRIER_UP_COUNT = 47,
+ IFLA_CARRIER_DOWN_COUNT = 48,
+ IFLA_NEW_IFINDEX = 49,
+ IFLA_MIN_MTU = 50,
+ IFLA_MAX_MTU = 51,
+ IFLA_PROP_LIST = 52,
+ IFLA_ALT_IFNAME = 53,
+ IFLA_PERM_ADDRESS = 54,
+ __IFLA_MAX = 55,
+};
+
+enum skb_free_reason {
+ SKB_REASON_CONSUMED = 0,
+ SKB_REASON_DROPPED = 1,
+};
+
+struct ifinfomsg {
+ unsigned char ifi_family;
+ unsigned char __ifi_pad;
+ short unsigned int ifi_type;
+ int ifi_index;
+ unsigned int ifi_flags;
+ unsigned int ifi_change;
+};
+
+struct xfrm_state_walk {
+ struct list_head all;
+ u8 state;
+ u8 dying;
+ u8 proto;
+ u32 seq;
+ struct xfrm_address_filter *filter;
+};
+
+struct xfrm_state_offload {
+ struct net_device *dev;
+ long unsigned int offload_handle;
+ unsigned int num_exthdrs;
+ u8 flags;
+};
+
+struct xfrm_mode {
+ u8 encap;
+ u8 family;
+ u8 flags;
+};
+
+struct xfrm_replay;
+
+struct xfrm_type;
+
+struct xfrm_type_offload;
+
+struct xfrm_state {
+ possible_net_t xs_net;
+ union {
+ struct hlist_node gclist;
+ struct hlist_node bydst;
+ };
+ struct hlist_node bysrc;
+ struct hlist_node byspi;
+ refcount_t refcnt;
+ spinlock_t lock;
+ struct xfrm_id id;
+ struct xfrm_selector sel;
+ struct xfrm_mark mark;
+ u32 if_id;
+ u32 tfcpad;
+ u32 genid;
+ struct xfrm_state_walk km;
+ struct {
+ u32 reqid;
+ u8 mode;
+ u8 replay_window;
+ u8 aalgo;
+ u8 ealgo;
+ u8 calgo;
+ u8 flags;
+ u16 family;
+ xfrm_address_t saddr;
+ int header_len;
+ int trailer_len;
+ u32 extra_flags;
+ struct xfrm_mark smark;
+ } props;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_algo_auth *aalg;
+ struct xfrm_algo *ealg;
+ struct xfrm_algo *calg;
+ struct xfrm_algo_aead *aead;
+ const char *geniv;
+ struct xfrm_encap_tmpl *encap;
+ struct sock *encap_sk;
+ xfrm_address_t *coaddr;
+ struct xfrm_state *tunnel;
+ atomic_t tunnel_users;
+ struct xfrm_replay_state replay;
+ struct xfrm_replay_state_esn *replay_esn;
+ struct xfrm_replay_state preplay;
+ struct xfrm_replay_state_esn *preplay_esn;
+ const struct xfrm_replay *repl;
+ u32 xflags;
+ u32 replay_maxage;
+ u32 replay_maxdiff;
+ struct timer_list rtimer;
+ struct xfrm_stats stats;
+ struct xfrm_lifetime_cur curlft;
+ struct hrtimer mtimer;
+ struct xfrm_state_offload xso;
+ long int saved_tmo;
+ time64_t lastused;
+ struct page_frag xfrag;
+ const struct xfrm_type *type;
+ struct xfrm_mode inner_mode;
+ struct xfrm_mode inner_mode_iaf;
+ struct xfrm_mode outer_mode;
+ const struct xfrm_type_offload *type_offload;
+ struct xfrm_sec_ctx *security;
+ void *data;
+};
+
+struct xfrm_policy_walk_entry {
+ struct list_head all;
+ u8 dead;
+};
+
+struct xfrm_policy_queue {
+ struct sk_buff_head hold_queue;
+ struct timer_list hold_timer;
+ long unsigned int timeout;
+};
+
+struct xfrm_tmpl {
+ struct xfrm_id id;
+ xfrm_address_t saddr;
+ short unsigned int encap_family;
+ u32 reqid;
+ u8 mode;
+ u8 share;
+ u8 optional;
+ u8 allalgs;
+ u32 aalgos;
+ u32 ealgos;
+ u32 calgos;
+};
+
+struct xfrm_policy {
+ possible_net_t xp_net;
+ struct hlist_node bydst;
+ struct hlist_node byidx;
+ rwlock_t lock;
+ refcount_t refcnt;
+ u32 pos;
+ struct timer_list timer;
+ atomic_t genid;
+ u32 priority;
+ u32 index;
+ u32 if_id;
+ struct xfrm_mark mark;
+ struct xfrm_selector selector;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_lifetime_cur curlft;
+ struct xfrm_policy_walk_entry walk;
+ struct xfrm_policy_queue polq;
+ bool bydst_reinsert;
+ u8 type;
+ u8 action;
+ u8 flags;
+ u8 xfrm_nr;
+ u16 family;
+ struct xfrm_sec_ctx *security;
+ struct xfrm_tmpl xfrm_vec[6];
+ struct hlist_node bydst_inexact_list;
+ struct callback_head rcu;
+};
+
+struct fib6_config {
+ u32 fc_table;
+ u32 fc_metric;
+ int fc_dst_len;
+ int fc_src_len;
+ int fc_ifindex;
+ u32 fc_flags;
+ u32 fc_protocol;
+ u16 fc_type;
+ u16 fc_delete_all_nh: 1;
+ u16 fc_ignore_dev_down: 1;
+ u16 __unused: 14;
+ u32 fc_nh_id;
+ struct in6_addr fc_dst;
+ struct in6_addr fc_src;
+ struct in6_addr fc_prefsrc;
+ struct in6_addr fc_gateway;
+ long unsigned int fc_expires;
+ struct nlattr *fc_mx;
+ int fc_mx_len;
+ int fc_mp_len;
+ struct nlattr *fc_mp;
+ struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+ bool fc_is_fdb;
+};
+
+struct fib_nh_exception {
+ struct fib_nh_exception *fnhe_next;
+ int fnhe_genid;
+ __be32 fnhe_daddr;
+ u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
+ __be32 fnhe_gw;
+ long unsigned int fnhe_expires;
+ struct rtable *fnhe_rth_input;
+ struct rtable *fnhe_rth_output;
+ long unsigned int fnhe_stamp;
+ struct callback_head rcu;
+};
+
+struct rtable {
+ struct dst_entry dst;
+ int rt_genid;
+ unsigned int rt_flags;
+ __u16 rt_type;
+ __u8 rt_is_input;
+ __u8 rt_uses_gateway;
+ int rt_iif;
+ u8 rt_gw_family;
+ union {
+ __be32 rt_gw4;
+ struct in6_addr rt_gw6;
+ };
+ u32 rt_mtu_locked: 1;
+ u32 rt_pmtu: 31;
+ struct list_head rt_uncached;
+ struct uncached_list *rt_uncached_list;
+};
+
+struct fnhe_hash_bucket {
+ struct fib_nh_exception *chain;
+};
+
+struct rt6_exception_bucket {
+ struct hlist_head chain;
+ int depth;
+};
+
+struct xfrm_replay {
+ void (*advance)(struct xfrm_state *, __be32);
+ int (*check)(struct xfrm_state *, struct sk_buff *, __be32);
+ int (*recheck)(struct xfrm_state *, struct sk_buff *, __be32);
+ void (*notify)(struct xfrm_state *, int);
+ int (*overflow)(struct xfrm_state *, struct sk_buff *);
+};
+
+struct xfrm_type {
+ char *description;
+ struct module *owner;
+ u8 proto;
+ u8 flags;
+ int (*init_state)(struct xfrm_state *);
+ void (*destructor)(struct xfrm_state *);
+ int (*input)(struct xfrm_state *, struct sk_buff *);
+ int (*output)(struct xfrm_state *, struct sk_buff *);
+ int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *);
+ int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
+};
+
+struct xfrm_type_offload {
+ char *description;
+ struct module *owner;
+ u8 proto;
+ void (*encap)(struct xfrm_state *, struct sk_buff *);
+ int (*input_tail)(struct xfrm_state *, struct sk_buff *);
+ int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t);
+};
+
+enum {
+ VETH_INFO_UNSPEC = 0,
+ VETH_INFO_PEER = 1,
+ __VETH_INFO_MAX = 2,
+};
+
+struct veth_stats {
+ u64 rx_drops;
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_redirect;
+ u64 xdp_drops;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+ u64 peer_tq_xdp_xmit;
+ u64 peer_tq_xdp_xmit_err;
+};
+
+struct veth_rq_stats {
+ struct veth_stats vs;
+ struct u64_stats_sync syncp;
+};
+
+struct veth_rq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct bpf_prog *xdp_prog;
+ struct xdp_mem_info xdp_mem;
+ struct veth_rq_stats stats;
+ bool rx_notify_masked;
+ long: 56;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct ptr_ring xdp_ring;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+struct veth_priv {
+ struct net_device *peer;
+ atomic64_t dropped;
+ struct bpf_prog *_xdp_prog;
+ struct veth_rq *rq;
+ unsigned int requested_headroom;
+};
+
+struct veth_xdp_tx_bq {
+ struct xdp_frame *q[16];
+ unsigned int count;
+};
+
+struct veth_q_stat_desc {
+ char desc[32];
+ size_t offset;
+};
+
+struct flow_dissector_key_control {
+ u16 thoff;
+ u16 addr_type;
+ u32 flags;
+};
+
+struct flow_dissector_key_basic {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+};
+
+struct flow_keys_basic {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+};
+
+struct nf_conntrack {
+ atomic_t use;
+};
+
+struct mmpin {
+ struct user_struct *user;
+ unsigned int num_pg;
+};
+
+struct ubuf_info {
+ void (*callback)(struct ubuf_info *, bool);
+ union {
+ struct {
+ long unsigned int desc;
+ void *ctx;
+ };
+ struct {
+ u32 id;
+ u16 len;
+ u16 zerocopy: 1;
+ u32 bytelen;
+ };
+ };
+ refcount_t refcnt;
+ struct mmpin mmp;
+};
+
+enum {
+ SKB_GSO_TCPV4 = 1,
+ SKB_GSO_DODGY = 2,
+ SKB_GSO_TCP_ECN = 4,
+ SKB_GSO_TCP_FIXEDID = 8,
+ SKB_GSO_TCPV6 = 16,
+ SKB_GSO_FCOE = 32,
+ SKB_GSO_GRE = 64,
+ SKB_GSO_GRE_CSUM = 128,
+ SKB_GSO_IPXIP4 = 256,
+ SKB_GSO_IPXIP6 = 512,
+ SKB_GSO_UDP_TUNNEL = 1024,
+ SKB_GSO_UDP_TUNNEL_CSUM = 2048,
+ SKB_GSO_PARTIAL = 4096,
+ SKB_GSO_TUNNEL_REMCSUM = 8192,
+ SKB_GSO_SCTP = 16384,
+ SKB_GSO_ESP = 32768,
+ SKB_GSO_UDP = 65536,
+ SKB_GSO_UDP_L4 = 131072,
+ SKB_GSO_FRAGLIST = 262144,
+};
+
+struct netdev_hw_addr {
+ struct list_head list;
+ unsigned char addr[32];
+ unsigned char type;
+ bool global_use;
+ int sync_cnt;
+ int refcount;
+ int synced;
+ struct callback_head callback_head;
+};
+
+enum netdev_state_t {
+ __LINK_STATE_START = 0,
+ __LINK_STATE_PRESENT = 1,
+ __LINK_STATE_NOCARRIER = 2,
+ __LINK_STATE_LINKWATCH_PENDING = 3,
+ __LINK_STATE_DORMANT = 4,
+ __LINK_STATE_TESTING = 5,
+};
+
+enum netdev_queue_state_t {
+ __QUEUE_STATE_DRV_XOFF = 0,
+ __QUEUE_STATE_STACK_XOFF = 1,
+ __QUEUE_STATE_FROZEN = 2,
+};
+
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *, char *);
+ ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t);
+};
+
+struct virtio_net_config {
+ __u8 mac[6];
+ __u16 status;
+ __u16 max_virtqueue_pairs;
+ __u16 mtu;
+ __u32 speed;
+ __u8 duplex;
+ __u8 rss_max_key_size;
+ __le16 rss_max_indirection_table_length;
+ __le32 supported_hash_types;
+};
+
+struct virtio_net_hdr {
+ __u8 flags;
+ __u8 gso_type;
+ __virtio16 hdr_len;
+ __virtio16 gso_size;
+ __virtio16 csum_start;
+ __virtio16 csum_offset;
+};
+
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ __virtio16 num_buffers;
+};
+
+struct virtio_net_ctrl_hdr {
+ __u8 class;
+ __u8 cmd;
+};
+
+typedef __u8 virtio_net_ctrl_ack;
+
+struct virtio_net_ctrl_mac {
+ __virtio32 entries;
+ __u8 macs[0];
+};
+
+struct virtio_net_ctrl_mq {
+ __virtio16 virtqueue_pairs;
+};
+
+struct failover_ops {
+ int (*slave_pre_register)(struct net_device *, struct net_device *);
+ int (*slave_register)(struct net_device *, struct net_device *);
+ int (*slave_pre_unregister)(struct net_device *, struct net_device *);
+ int (*slave_unregister)(struct net_device *, struct net_device *);
+ int (*slave_link_change)(struct net_device *, struct net_device *);
+ int (*slave_name_change)(struct net_device *, struct net_device *);
+ rx_handler_result_t (*slave_handle_frame)(struct sk_buff **);
+};
+
+struct failover {
+ struct list_head list;
+ struct net_device *failover_dev;
+ struct failover_ops *ops;
+};
+
+struct ewma_pkt_len {
+ long unsigned int internal;
+};
+
+struct virtnet_stat_desc {
+ char desc[32];
+ size_t offset;
+};
+
+struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 xdp_tx;
+ u64 xdp_tx_drops;
+ u64 kicks;
+};
+
+struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 xdp_packets;
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 kicks;
+};
+
+struct send_queue {
+ struct virtqueue *vq;
+ struct scatterlist sg[19];
+ char name[40];
+ struct virtnet_sq_stats stats;
+ struct napi_struct napi;
+};
+
+struct receive_queue {
+ struct virtqueue *vq;
+ struct napi_struct napi;
+ struct bpf_prog *xdp_prog;
+ struct virtnet_rq_stats stats;
+ struct page *pages;
+ struct ewma_pkt_len mrg_avg_pkt_len;
+ struct page_frag alloc_frag;
+ struct scatterlist sg[19];
+ unsigned int min_buf_len;
+ char name[40];
+ struct xdp_rxq_info xdp_rxq;
+};
+
+struct control_buf {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ struct virtio_net_ctrl_mq mq;
+ u8 promisc;
+ u8 allmulti;
+ __virtio16 vid;
+ __virtio64 offloads;
+};
+
+struct virtnet_info {
+ struct virtio_device *vdev;
+ struct virtqueue *cvq;
+ struct net_device *dev;
+ struct send_queue *sq;
+ struct receive_queue *rq;
+ unsigned int status;
+ u16 max_queue_pairs;
+ u16 curr_queue_pairs;
+ u16 xdp_queue_pairs;
+ bool big_packets;
+ bool mergeable_rx_bufs;
+ bool has_cvq;
+ bool any_header_sg;
+ u8 hdr_len;
+ struct delayed_work refill;
+ struct work_struct config_work;
+ bool affinity_hint_set;
+ struct hlist_node node;
+ struct hlist_node node_dead;
+ struct control_buf *ctrl;
+ u8 duplex;
+ u32 speed;
+ long unsigned int guest_offloads;
+ long unsigned int guest_offloads_capable;
+ struct failover *failover;
+};
+
+struct netdev_lag_lower_state_info {
+ u8 link_up: 1;
+ u8 tx_enabled: 1;
+};
+
+struct net_failover_info {
+ struct net_device *primary_dev;
+ struct net_device *standby_dev;
+ struct rtnl_link_stats64 primary_stats;
+ struct rtnl_link_stats64 standby_stats;
+ struct rtnl_link_stats64 failover_stats;
+ spinlock_t stats_lock;
+};
+
+struct input_mt_slot {
+ int abs[14];
+ unsigned int frame;
+ unsigned int key;
+};
+
+struct input_mt {
+ int trkid;
+ int num_slots;
+ int slot;
+ unsigned int flags;
+ unsigned int frame;
+ int *red;
+ struct input_mt_slot slots[0];
+};
+
+union input_seq_state {
+ struct {
+ short unsigned int pos;
+ bool mutex_acquired;
+ };
+ void *p;
+};
+
+struct input_devres {
+ struct input_dev *input;
+};
+
+struct input_event {
+ __kernel_ulong_t __sec;
+ __kernel_ulong_t __usec;
+ __u16 type;
+ __u16 code;
+ __s32 value;
+};
+
+struct input_mt_pos {
+ s16 x;
+ s16 y;
+};
+
+struct input_dev_poller {
+ void (*poll)(struct input_dev *);
+ unsigned int poll_interval;
+ unsigned int poll_interval_max;
+ unsigned int poll_interval_min;
+ struct input_dev *input;
+ struct delayed_work work;
+};
+
+struct thermal_attr {
+ struct device_attribute attr;
+ char name[20];
+};
+
+struct trace_event_raw_thermal_temperature {
+ struct trace_entry ent;
+ u32 __data_loc_thermal_zone;
+ int id;
+ int temp_prev;
+ int temp;
+ char __data[0];
+};
+
+struct trace_event_raw_cdev_update {
+ struct trace_entry ent;
+ u32 __data_loc_type;
+ long unsigned int target;
+ char __data[0];
+};
+
+struct trace_event_raw_thermal_zone_trip {
+ struct trace_entry ent;
+ u32 __data_loc_thermal_zone;
+ int id;
+ int trip;
+ enum thermal_trip_type trip_type;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_thermal_temperature {
+ u32 thermal_zone;
+};
+
+struct trace_event_data_offsets_cdev_update {
+ u32 type;
+};
+
+struct trace_event_data_offsets_thermal_zone_trip {
+ u32 thermal_zone;
+};
+
+typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *);
+
+typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int);
+
+typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type);
+
+struct thermal_instance {
+ int id;
+ char name[20];
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ int trip;
+ bool initialized;
+ long unsigned int upper;
+ long unsigned int lower;
+ long unsigned int target;
+ char attr_name[20];
+ struct device_attribute attr;
+ char weight_attr_name[20];
+ struct device_attribute weight_attr;
+ struct list_head tz_node;
+ struct list_head cdev_node;
+ unsigned int weight;
+};
+
+struct cpufreq_policy_data {
+ struct cpufreq_cpuinfo cpuinfo;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int cpu;
+ unsigned int min;
+ unsigned int max;
+};
+
+struct cpufreq_driver {
+ char name[16];
+ u8 flags;
+ void *driver_data;
+ int (*init)(struct cpufreq_policy *);
+ int (*verify)(struct cpufreq_policy_data *);
+ int (*setpolicy)(struct cpufreq_policy *);
+ int (*target)(struct cpufreq_policy *, unsigned int, unsigned int);
+ int (*target_index)(struct cpufreq_policy *, unsigned int);
+ unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int);
+ unsigned int (*resolve_freq)(struct cpufreq_policy *, unsigned int);
+ unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int);
+ int (*target_intermediate)(struct cpufreq_policy *, unsigned int);
+ unsigned int (*get)(unsigned int);
+ void (*update_limits)(unsigned int);
+ int (*bios_limit)(int, unsigned int *);
+ int (*online)(struct cpufreq_policy *);
+ int (*offline)(struct cpufreq_policy *);
+ int (*exit)(struct cpufreq_policy *);
+ void (*stop_cpu)(struct cpufreq_policy *);
+ int (*suspend)(struct cpufreq_policy *);
+ int (*resume)(struct cpufreq_policy *);
+ void (*ready)(struct cpufreq_policy *);
+ struct freq_attr **attr;
+ bool boost_enabled;
+ int (*set_boost)(struct cpufreq_policy *, int);
+};
+
+enum acpi_preferred_pm_profiles {
+ PM_UNSPECIFIED = 0,
+ PM_DESKTOP = 1,
+ PM_MOBILE = 2,
+ PM_WORKSTATION = 3,
+ PM_ENTERPRISE_SERVER = 4,
+ PM_SOHO_SERVER = 5,
+ PM_APPLIANCE_PC = 6,
+ PM_PERFORMANCE_SERVER = 7,
+ PM_TABLET = 8,
+};
+
+struct sample {
+ int32_t core_avg_perf;
+ int32_t busy_scaled;
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+ u64 time;
+};
+
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int max_pstate_physical;
+ int scaling;
+ int turbo_pstate;
+ unsigned int max_freq;
+ unsigned int turbo_freq;
+};
+
+struct vid_data {
+ int min;
+ int max;
+ int turbo;
+ int32_t ratio;
+};
+
+struct global_params {
+ bool no_turbo;
+ bool turbo_disabled;
+ bool turbo_disabled_mf;
+ int max_perf_pct;
+ int min_perf_pct;
+};
+
+struct cpudata {
+ int cpu;
+ unsigned int policy;
+ struct update_util_data update_util;
+ bool update_util_set;
+ struct pstate_data pstate;
+ struct vid_data vid;
+ u64 last_update;
+ u64 last_sample_time;
+ u64 aperf_mperf_shift;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ u64 prev_tsc;
+ u64 prev_cummulative_iowait;
+ struct sample sample;
+ int32_t min_perf_ratio;
+ int32_t max_perf_ratio;
+ struct acpi_processor_performance acpi_perf_data;
+ bool valid_pss_table;
+ unsigned int iowait_boost;
+ s16 epp_powersave;
+ s16 epp_policy;
+ s16 epp_default;
+ s16 epp_saved;
+ u64 hwp_req_cached;
+ u64 hwp_cap_cached;
+ u64 last_io_update;
+ unsigned int sched_flags;
+ u32 hwp_boost_min;
+};
+
+struct pstate_funcs {
+ int (*get_max)();
+ int (*get_max_physical)();
+ int (*get_min)();
+ int (*get_turbo)();
+ int (*get_scaling)();
+ int (*get_aperf_mperf_shift)();
+ u64 (*get_val)(struct cpudata *, int);
+ void (*get_vid)(struct cpudata *);
+};
+
+enum {
+ PSS = 0,
+ PPC = 1,
+};
+
+struct cpuidle_governor {
+ char name[16];
+ struct list_head governor_list;
+ unsigned int rating;
+ int (*enable)(struct cpuidle_driver___2 *, struct cpuidle_device *);
+ void (*disable)(struct cpuidle_driver___2 *, struct cpuidle_device *);
+ int (*select)(struct cpuidle_driver___2 *, struct cpuidle_device *, bool *);
+ void (*reflect)(struct cpuidle_device *, int);
+};
+
+struct cpuidle_state_kobj {
+ struct cpuidle_state *state;
+ struct cpuidle_state_usage *state_usage;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+ struct cpuidle_device *device;
+};
+
+struct cpuidle_device_kobj {
+ struct cpuidle_device *dev;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
+struct cpuidle_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpuidle_device *, char *);
+ ssize_t (*store)(struct cpuidle_device *, const char *, size_t);
+};
+
+struct cpuidle_state_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *);
+ ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t);
+};
+
+struct menu_device {
+ int needs_update;
+ int tick_wakeup;
+ u64 next_timer_ns;
+ unsigned int bucket;
+ unsigned int correction_factor[12];
+ unsigned int intervals[8];
+ int interval_ptr;
+};
+
+enum dmi_entry_type {
+ DMI_ENTRY_BIOS = 0,
+ DMI_ENTRY_SYSTEM = 1,
+ DMI_ENTRY_BASEBOARD = 2,
+ DMI_ENTRY_CHASSIS = 3,
+ DMI_ENTRY_PROCESSOR = 4,
+ DMI_ENTRY_MEM_CONTROLLER = 5,
+ DMI_ENTRY_MEM_MODULE = 6,
+ DMI_ENTRY_CACHE = 7,
+ DMI_ENTRY_PORT_CONNECTOR = 8,
+ DMI_ENTRY_SYSTEM_SLOT = 9,
+ DMI_ENTRY_ONBOARD_DEVICE = 10,
+ DMI_ENTRY_OEMSTRINGS = 11,
+ DMI_ENTRY_SYSCONF = 12,
+ DMI_ENTRY_BIOS_LANG = 13,
+ DMI_ENTRY_GROUP_ASSOC = 14,
+ DMI_ENTRY_SYSTEM_EVENT_LOG = 15,
+ DMI_ENTRY_PHYS_MEM_ARRAY = 16,
+ DMI_ENTRY_MEM_DEVICE = 17,
+ DMI_ENTRY_32_MEM_ERROR = 18,
+ DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19,
+ DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20,
+ DMI_ENTRY_BUILTIN_POINTING_DEV = 21,
+ DMI_ENTRY_PORTABLE_BATTERY = 22,
+ DMI_ENTRY_SYSTEM_RESET = 23,
+ DMI_ENTRY_HW_SECURITY = 24,
+ DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25,
+ DMI_ENTRY_VOLTAGE_PROBE = 26,
+ DMI_ENTRY_COOLING_DEV = 27,
+ DMI_ENTRY_TEMP_PROBE = 28,
+ DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29,
+ DMI_ENTRY_OOB_REMOTE_ACCESS = 30,
+ DMI_ENTRY_BIS_ENTRY = 31,
+ DMI_ENTRY_SYSTEM_BOOT = 32,
+ DMI_ENTRY_MGMT_DEV = 33,
+ DMI_ENTRY_MGMT_DEV_COMPONENT = 34,
+ DMI_ENTRY_MGMT_DEV_THRES = 35,
+ DMI_ENTRY_MEM_CHANNEL = 36,
+ DMI_ENTRY_IPMI_DEV = 37,
+ DMI_ENTRY_SYS_POWER_SUPPLY = 38,
+ DMI_ENTRY_ADDITIONAL = 39,
+ DMI_ENTRY_ONBOARD_DEV_EXT = 40,
+ DMI_ENTRY_MGMT_CONTROLLER_HOST = 41,
+ DMI_ENTRY_INACTIVE = 126,
+ DMI_ENTRY_END_OF_TABLE = 127,
+};
+
+struct dmi_header {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+struct dmi_memdev_info {
+ const char *device;
+ const char *bank;
+ u64 size;
+ u16 handle;
+ u8 type;
+};
+
+struct firmware_map_entry {
+ u64 start;
+ u64 end;
+ const char *type;
+ struct list_head list;
+ struct kobject kobj;
+};
+
+struct memmap_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct firmware_map_entry *, char *);
+};
+
+typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool);
+
+typedef struct {
+ efi_guid_t guid;
+ u32 table;
+} efi_config_table_32_t;
+
+typedef union {
+ struct {
+ efi_guid_t guid;
+ void *table;
+ };
+ efi_config_table_32_t mixed_mode;
+} efi_config_table_t;
+
+typedef struct {
+ u16 version;
+ u16 length;
+ u32 runtime_services_supported;
+} efi_rt_properties_table_t;
+
+struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_store_t *query_variable_store;
+};
+
+struct efivars {
+ struct kset *kset;
+ struct kobject *kobject;
+ const struct efivar_operations *ops;
+};
+
+struct efi_variable {
+ efi_char16_t VariableName[512];
+ efi_guid_t VendorGuid;
+ long unsigned int DataSize;
+ __u8 Data[1024];
+ efi_status_t Status;
+ __u32 Attributes;
+} __attribute__((packed));
+
+struct efivar_entry {
+ struct efi_variable var;
+ struct list_head list;
+ struct kobject kobj;
+ bool scanning;
+ bool deleting;
+};
+
+struct linux_efi_random_seed {
+ u32 size;
+ u8 bits[0];
+};
+
+struct linux_efi_memreserve {
+ int size;
+ atomic_t count;
+ phys_addr_t next;
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[0];
+};
+
+struct efi_generic_dev_path {
+ u8 type;
+ u8 sub_type;
+ u16 length;
+};
+
+struct variable_validate {
+ efi_guid_t vendor;
+ char *name;
+ bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int);
+};
+
+typedef struct {
+ u32 version;
+ u32 num_entries;
+ u32 desc_size;
+ u32 reserved;
+ efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u32 final_events_preboot_size;
+ u8 version;
+ u8 log[0];
+};
+
+struct efi_tcg2_final_events_table {
+ u64 version;
+ u64 nr_events;
+ u8 events[0];
+};
+
+enum hash_algo {
+ HASH_ALGO_MD4 = 0,
+ HASH_ALGO_MD5 = 1,
+ HASH_ALGO_SHA1 = 2,
+ HASH_ALGO_RIPE_MD_160 = 3,
+ HASH_ALGO_SHA256 = 4,
+ HASH_ALGO_SHA384 = 5,
+ HASH_ALGO_SHA512 = 6,
+ HASH_ALGO_SHA224 = 7,
+ HASH_ALGO_RIPE_MD_128 = 8,
+ HASH_ALGO_RIPE_MD_256 = 9,
+ HASH_ALGO_RIPE_MD_320 = 10,
+ HASH_ALGO_WP_256 = 11,
+ HASH_ALGO_WP_384 = 12,
+ HASH_ALGO_WP_512 = 13,
+ HASH_ALGO_TGR_128 = 14,
+ HASH_ALGO_TGR_160 = 15,
+ HASH_ALGO_TGR_192 = 16,
+ HASH_ALGO_SM3_256 = 17,
+ HASH_ALGO_STREEBOG_256 = 18,
+ HASH_ALGO_STREEBOG_512 = 19,
+ HASH_ALGO__LAST = 20,
+};
+
+struct tpm_digest {
+ u16 alg_id;
+ u8 digest[64];
+};
+
+enum tpm_duration {
+ TPM_SHORT = 0,
+ TPM_MEDIUM = 1,
+ TPM_LONG = 2,
+ TPM_LONG_LONG = 3,
+ TPM_UNDEFINED = 4,
+ TPM_NUM_DURATIONS = 4,
+};
+
+struct tcg_efi_specid_event_algs {
+ u16 alg_id;
+ u16 digest_size;
+};
+
+struct tcg_efi_specid_event_head {
+ u8 signature[16];
+ u32 platform_class;
+ u8 spec_version_minor;
+ u8 spec_version_major;
+ u8 spec_errata;
+ u8 uintnsize;
+ u32 num_algs;
+ struct tcg_efi_specid_event_algs digest_sizes[0];
+};
+
+struct tcg_pcr_event {
+ u32 pcr_idx;
+ u32 event_type;
+ u8 digest[20];
+ u32 event_size;
+ u8 event[0];
+};
+
+struct tcg_event_field {
+ u32 event_size;
+ u8 event[0];
+};
+
+struct tcg_pcr_event2_head {
+ u32 pcr_idx;
+ u32 event_type;
+ u32 count;
+ struct tpm_digest digests[0];
+};
+
+typedef u64 efi_physical_addr_t;
+
+typedef struct {
+ u64 length;
+ u64 data;
+} efi_capsule_block_desc_t;
+
+struct efi_system_resource_entry_v1 {
+ efi_guid_t fw_class;
+ u32 fw_type;
+ u32 fw_version;
+ u32 lowest_supported_fw_version;
+ u32 capsule_flags;
+ u32 last_attempt_version;
+ u32 last_attempt_status;
+};
+
+struct efi_system_resource_table {
+ u32 fw_resource_count;
+ u32 fw_resource_count_max;
+ u64 fw_resource_version;
+ u8 entries[0];
+};
+
+struct esre_entry {
+ union {
+ struct efi_system_resource_entry_v1 *esre1;
+ } esre;
+ struct kobject kobj;
+ struct list_head list;
+};
+
+struct esre_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct esre_entry *, char *);
+ ssize_t (*store)(struct esre_entry *, const char *, size_t);
+};
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *d3_sts_0;
+ const struct pmc_bit_map *d3_sts_1;
+ const struct pmc_bit_map *func_dis;
+ const struct pmc_bit_map *func_dis_2;
+ const struct pmc_bit_map *pss;
+};
+
+struct pmc_data {
+ const struct pmc_reg_map *map;
+ const struct pmc_clk *clks;
+};
+
+struct pmc_dev {
+ u32 base_addr;
+ void *regmap;
+ const struct pmc_reg_map *map;
+ struct dentry *dbgfs_dir;
+ bool init;
+};
+
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+};
+
+struct nvmem_cell_lookup {
+ const char *nvmem_name;
+ const char *cell_name;
+ const char *dev_id;
+ const char *con_id;
+ struct list_head node;
+};
+
+enum {
+ NVMEM_ADD = 1,
+ NVMEM_REMOVE = 2,
+ NVMEM_CELL_ADD = 3,
+ NVMEM_CELL_REMOVE = 4,
+};
+
+typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t);
+
+typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t);
+
+enum nvmem_type {
+ NVMEM_TYPE_UNKNOWN = 0,
+ NVMEM_TYPE_EEPROM = 1,
+ NVMEM_TYPE_OTP = 2,
+ NVMEM_TYPE_BATTERY_BACKED = 3,
+};
+
+struct nvmem_config {
+ struct device *dev;
+ const char *name;
+ int id;
+ struct module *owner;
+ struct gpio_desc *wp_gpio;
+ const struct nvmem_cell_info *cells;
+ int ncells;
+ enum nvmem_type type;
+ bool read_only;
+ bool root_only;
+ bool no_of_node;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ int size;
+ int word_size;
+ int stride;
+ void *priv;
+ bool compat;
+ struct device *base_dev;
+};
+
+struct nvmem_cell_table {
+ const char *nvmem_name;
+ const struct nvmem_cell_info *cells;
+ size_t ncells;
+ struct list_head node;
+};
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ bool root_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
+ void *priv;
+};
+
+struct nvmem_cell {
+ const char *name;
+ int offset;
+ int bytes;
+ int bit_offset;
+ int nbits;
+ struct device_node *np;
+ struct nvmem_device *nvmem;
+ struct list_head node;
+};
+
+struct net_device_devres {
+ struct net_device *ndev;
+};
+
+typedef u16 u_int16_t;
+
+struct __kernel_old_timespec {
+ __kernel_old_time_t tv_sec;
+ long int tv_nsec;
+};
+
+struct __kernel_sock_timeval {
+ __s64 tv_sec;
+ __s64 tv_usec;
+};
+
+struct mmsghdr {
+ struct user_msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+struct scm_timestamping_internal {
+ struct timespec64 ts[3];
+};
+
+enum sock_shutdown_cmd {
+ SHUT_RD = 0,
+ SHUT_WR = 1,
+ SHUT_RDWR = 2,
+};
+
+struct ifconf {
+ int ifc_len;
+ union {
+ char *ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+
+enum ip_conntrack_info {
+ IP_CT_ESTABLISHED = 0,
+ IP_CT_RELATED = 1,
+ IP_CT_NEW = 2,
+ IP_CT_IS_REPLY = 3,
+ IP_CT_ESTABLISHED_REPLY = 3,
+ IP_CT_RELATED_REPLY = 4,
+ IP_CT_NUMBER = 5,
+ IP_CT_UNTRACKED = 7,
+};
+
+struct nf_hook_state;
+
+typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *);
+
+struct nf_hook_entry {
+ nf_hookfn *hook;
+ void *priv;
+};
+
+struct nf_hook_entries {
+ u16 num_hook_entries;
+ struct nf_hook_entry hooks[0];
+};
+
+enum {
+ SOF_TIMESTAMPING_TX_HARDWARE = 1,
+ SOF_TIMESTAMPING_TX_SOFTWARE = 2,
+ SOF_TIMESTAMPING_RX_HARDWARE = 4,
+ SOF_TIMESTAMPING_RX_SOFTWARE = 8,
+ SOF_TIMESTAMPING_SOFTWARE = 16,
+ SOF_TIMESTAMPING_SYS_HARDWARE = 32,
+ SOF_TIMESTAMPING_RAW_HARDWARE = 64,
+ SOF_TIMESTAMPING_OPT_ID = 128,
+ SOF_TIMESTAMPING_TX_SCHED = 256,
+ SOF_TIMESTAMPING_TX_ACK = 512,
+ SOF_TIMESTAMPING_OPT_CMSG = 1024,
+ SOF_TIMESTAMPING_OPT_TSONLY = 2048,
+ SOF_TIMESTAMPING_OPT_STATS = 4096,
+ SOF_TIMESTAMPING_OPT_PKTINFO = 8192,
+ SOF_TIMESTAMPING_OPT_TX_SWHW = 16384,
+ SOF_TIMESTAMPING_LAST = 16384,
+ SOF_TIMESTAMPING_MASK = 32767,
+};
+
+struct scm_ts_pktinfo {
+ __u32 if_index;
+ __u32 pkt_length;
+ __u32 reserved[2];
+};
+
+struct socket_alloc {
+ struct socket socket;
+ struct inode vfs_inode;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+struct nf_hook_state {
+ unsigned int hook;
+ u_int8_t pf;
+ struct net_device *in;
+ struct net_device *out;
+ struct sock *sk;
+ struct net *net;
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *);
+};
+
+enum nf_nat_manip_type {
+ NF_NAT_MANIP_SRC = 0,
+ NF_NAT_MANIP_DST = 1,
+};
+
+struct nf_conn;
+
+struct nf_nat_hook {
+ int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *);
+ void (*decode_session)(struct sk_buff *, struct flowi *);
+ unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn *, enum nf_nat_manip_type, enum ip_conntrack_dir);
+};
+
+struct nf_conntrack_zone {
+ u16 id;
+ u8 flags;
+ u8 dir;
+};
+
+union nf_inet_addr {
+ __u32 all[4];
+ __be32 ip;
+ __be32 ip6[4];
+ struct in_addr in;
+ struct in6_addr in6;
+};
+
+union nf_conntrack_man_proto {
+ __be16 all;
+ struct {
+ __be16 port;
+ } tcp;
+ struct {
+ __be16 port;
+ } udp;
+ struct {
+ __be16 id;
+ } icmp;
+ struct {
+ __be16 port;
+ } dccp;
+ struct {
+ __be16 port;
+ } sctp;
+ struct {
+ __be16 key;
+ } gre;
+};
+
+struct nf_conntrack_man {
+ union nf_inet_addr u3;
+ union nf_conntrack_man_proto u;
+ u_int16_t l3num;
+};
+
+struct nf_conntrack_tuple {
+ struct nf_conntrack_man src;
+ struct {
+ union nf_inet_addr u3;
+ union {
+ __be16 all;
+ struct {
+ __be16 port;
+ } tcp;
+ struct {
+ __be16 port;
+ } udp;
+ struct {
+ u_int8_t type;
+ u_int8_t code;
+ } icmp;
+ struct {
+ __be16 port;
+ } dccp;
+ struct {
+ __be16 port;
+ } sctp;
+ struct {
+ __be16 key;
+ } gre;
+ } u;
+ u_int8_t protonum;
+ u_int8_t dir;
+ } dst;
+};
+
+struct nf_conntrack_tuple_hash {
+ struct hlist_nulls_node hnnode;
+ struct nf_conntrack_tuple tuple;
+};
+
+typedef u32 u_int32_t;
+
+typedef u64 u_int64_t;
+
+struct nf_ct_dccp {
+ u_int8_t role[2];
+ u_int8_t state;
+ u_int8_t last_pkt;
+ u_int8_t last_dir;
+ u_int64_t handshake_seq;
+};
+
+struct ip_ct_sctp {
+ enum sctp_conntrack state;
+ __be32 vtag[2];
+};
+
+struct ip_ct_tcp_state {
+ u_int32_t td_end;
+ u_int32_t td_maxend;
+ u_int32_t td_maxwin;
+ u_int32_t td_maxack;
+ u_int8_t td_scale;
+ u_int8_t flags;
+};
+
+struct ip_ct_tcp {
+ struct ip_ct_tcp_state seen[2];
+ u_int8_t state;
+ u_int8_t last_dir;
+ u_int8_t retrans;
+ u_int8_t last_index;
+ u_int32_t last_seq;
+ u_int32_t last_ack;
+ u_int32_t last_end;
+ u_int16_t last_win;
+ u_int8_t last_wscale;
+ u_int8_t last_flags;
+};
+
+struct nf_ct_udp {
+ long unsigned int stream_ts;
+};
+
+struct nf_ct_gre {
+ unsigned int stream_timeout;
+ unsigned int timeout;
+};
+
+union nf_conntrack_proto {
+ struct nf_ct_dccp dccp;
+ struct ip_ct_sctp sctp;
+ struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
+ struct nf_ct_gre gre;
+ unsigned int tmpl_padto;
+};
+
+struct nf_ct_ext;
+
+struct nf_conn {
+ struct nf_conntrack ct_general;
+ spinlock_t lock;
+ u32 timeout;
+ struct nf_conntrack_zone zone;
+ struct nf_conntrack_tuple_hash tuplehash[2];
+ long unsigned int status;
+ u16 cpu;
+ possible_net_t ct_net;
+ struct hlist_node nat_bysource;
+ struct { } __nfct_init_offset;
+ struct nf_conn *master;
+ u_int32_t mark;
+ struct nf_ct_ext *ext;
+ union nf_conntrack_proto proto;
+};
+
+struct nf_ct_hook {
+ int (*update)(struct net *, struct sk_buff *);
+ void (*destroy)(struct nf_conntrack *);
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *);
+};
+
+struct nfnl_ct_hook {
+ struct nf_conn * (*get_ct)(const struct sk_buff *, enum ip_conntrack_info *);
+ size_t (*build_size)(const struct nf_conn *);
+ int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t);
+ int (*parse)(const struct nlattr *, struct nf_conn *);
+ int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32);
+ void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32);
+};
+
+struct inet6_skb_parm {
+ int iif;
+ __be16 ra;
+ __u16 dst0;
+ __u16 srcrt;
+ __u16 dst1;
+ __u16 lastopt;
+ __u16 nhoff;
+ __u16 flags;
+ __u16 frag_max_size;
+};
+
+struct inet_skb_parm {
+ int iif;
+ struct ip_options opt;
+ u16 flags;
+ u16 frag_max_size;
+};
+
+struct sock_extended_err {
+ __u32 ee_errno;
+ __u8 ee_origin;
+ __u8 ee_type;
+ __u8 ee_code;
+ __u8 ee_pad;
+ __u32 ee_info;
+ __u32 ee_data;
+};
+
+struct sock_exterr_skb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+ struct sock_extended_err ee;
+ u16 addr_offset;
+ __be16 port;
+ u8 opt_stats: 1;
+ u8 unused: 7;
+};
+
+struct used_address {
+ struct __kernel_sockaddr_storage name;
+ unsigned int name_len;
+};
+
+struct linger {
+ int l_onoff;
+ int l_linger;
+};
+
+struct ucred {
+ __u32 pid;
+ __u32 uid;
+ __u32 gid;
+};
+
+struct prot_inuse {
+ int val[64];
+};
+
+struct offload_callbacks {
+ struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t);
+ struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *);
+ int (*gro_complete)(struct sk_buff *, int);
+};
+
+enum txtime_flags {
+ SOF_TXTIME_DEADLINE_MODE = 1,
+ SOF_TXTIME_REPORT_ERRORS = 2,
+ SOF_TXTIME_FLAGS_LAST = 2,
+ SOF_TXTIME_FLAGS_MASK = 3,
+};
+
+struct sock_txtime {
+ __kernel_clockid_t clockid;
+ __u32 flags;
+};
+
+enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+ SK_PACING_FQ = 2,
+};
+
+struct sockcm_cookie {
+ u64 transmit_time;
+ u32 mark;
+ u16 tsflags;
+};
+
+struct fastopen_queue {
+ struct request_sock *rskq_rst_head;
+ struct request_sock *rskq_rst_tail;
+ spinlock_t lock;
+ int qlen;
+ int max_qlen;
+ struct tcp_fastopen_context *ctx;
+};
+
+struct request_sock_queue {
+ spinlock_t rskq_lock;
+ u8 rskq_defer_accept;
+ u32 synflood_warned;
+ atomic_t qlen;
+ atomic_t young;
+ struct request_sock *rskq_accept_head;
+ struct request_sock *rskq_accept_tail;
+ struct fastopen_queue fastopenq;
+};
+
+struct inet_connection_sock_af_ops {
+ int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *);
+ void (*send_check)(struct sock *, struct sk_buff *);
+ int (*rebuild_header)(struct sock *);
+ void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *);
+ int (*conn_request)(struct sock *, struct sk_buff *);
+ struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *);
+ u16 net_header_len;
+ u16 net_frag_header_len;
+ u16 sockaddr_len;
+ int (*setsockopt)(struct sock *, int, int, char *, unsigned int);
+ int (*getsockopt)(struct sock *, int, int, char *, int *);
+ void (*addr2sockaddr)(struct sock *, struct sockaddr *);
+ void (*mtu_reduced)(struct sock *);
+};
+
+struct inet_bind_bucket;
+
+struct tcp_ulp_ops;
+
+struct inet_connection_sock {
+ struct inet_sock icsk_inet;
+ struct request_sock_queue icsk_accept_queue;
+ struct inet_bind_bucket *icsk_bind_hash;
+ long unsigned int icsk_timeout;
+ struct timer_list icsk_retransmit_timer;
+ struct timer_list icsk_delack_timer;
+ __u32 icsk_rto;
+ __u32 icsk_pmtu_cookie;
+ const struct tcp_congestion_ops *icsk_ca_ops;
+ const struct inet_connection_sock_af_ops *icsk_af_ops;
+ const struct tcp_ulp_ops *icsk_ulp_ops;
+ void *icsk_ulp_data;
+ void (*icsk_clean_acked)(struct sock *, u32);
+ struct hlist_node icsk_listen_portaddr_node;
+ unsigned int (*icsk_sync_mss)(struct sock *, u32);
+ __u8 icsk_ca_state: 6;
+ __u8 icsk_ca_setsockopt: 1;
+ __u8 icsk_ca_dst_locked: 1;
+ __u8 icsk_retransmits;
+ __u8 icsk_pending;
+ __u8 icsk_backoff;
+ __u8 icsk_syn_retries;
+ __u8 icsk_probes_out;
+ __u16 icsk_ext_hdr_len;
+ struct {
+ __u8 pending;
+ __u8 quick;
+ __u8 pingpong;
+ __u8 blocked;
+ __u32 ato;
+ long unsigned int timeout;
+ __u32 lrcvtime;
+ __u16 last_seg_size;
+ __u16 rcv_mss;
+ } icsk_ack;
+ struct {
+ int enabled;
+ int search_high;
+ int search_low;
+ int probe_size;
+ u32 probe_timestamp;
+ } icsk_mtup;
+ u32 icsk_user_timeout;
+ u64 icsk_ca_priv[13];
+};
+
+struct inet_bind_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ short unsigned int port;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
+ struct in6_addr fast_v6_rcv_saddr;
+ __be32 fast_rcv_saddr;
+ short unsigned int fast_sk_family;
+ bool fast_ipv6_only;
+ struct hlist_node node;
+ struct hlist_head owners;
+};
+
+struct tcp_ulp_ops {
+ struct list_head list;
+ int (*init)(struct sock *);
+ void (*update)(struct sock *, struct proto *, void (*)(struct sock *));
+ void (*release)(struct sock *);
+ int (*get_info)(const struct sock *, struct sk_buff *);
+ size_t (*get_info_size)(const struct sock *);
+ void (*clone)(const struct request_sock *, struct sock *, const gfp_t);
+ char name[16];
+ struct module *owner;
+};
+
+struct tcp_sack_block {
+ u32 start_seq;
+ u32 end_seq;
+};
+
+struct tcp_options_received {
+ int ts_recent_stamp;
+ u32 ts_recent;
+ u32 rcv_tsval;
+ u32 rcv_tsecr;
+ u16 saw_tstamp: 1;
+ u16 tstamp_ok: 1;
+ u16 dsack: 1;
+ u16 wscale_ok: 1;
+ u16 sack_ok: 3;
+ u16 smc_ok: 1;
+ u16 snd_wscale: 4;
+ u16 rcv_wscale: 4;
+ u8 num_sacks;
+ u16 user_mss;
+ u16 mss_clamp;
+};
+
+struct tcp_rack {
+ u64 mstamp;
+ u32 rtt_us;
+ u32 end_seq;
+ u32 last_delivered;
+ u8 reo_wnd_steps;
+ u8 reo_wnd_persist: 5;
+ u8 dsack_seen: 1;
+ u8 advanced: 1;
+};
+
+struct tcp_sock_af_ops;
+
+struct tcp_md5sig_info;
+
+struct tcp_fastopen_request;
+
+struct tcp_sock {
+ struct inet_connection_sock inet_conn;
+ u16 tcp_header_len;
+ u16 gso_segs;
+ __be32 pred_flags;
+ u64 bytes_received;
+ u32 segs_in;
+ u32 data_segs_in;
+ u32 rcv_nxt;
+ u32 copied_seq;
+ u32 rcv_wup;
+ u32 snd_nxt;
+ u32 segs_out;
+ u32 data_segs_out;
+ u64 bytes_sent;
+ u64 bytes_acked;
+ u32 dsack_dups;
+ u32 snd_una;
+ u32 snd_sml;
+ u32 rcv_tstamp;
+ u32 lsndtime;
+ u32 last_oow_ack_time;
+ u32 compressed_ack_rcv_nxt;
+ u32 tsoffset;
+ struct list_head tsq_node;
+ struct list_head tsorted_sent_queue;
+ u32 snd_wl1;
+ u32 snd_wnd;
+ u32 max_window;
+ u32 mss_cache;
+ u32 window_clamp;
+ u32 rcv_ssthresh;
+ struct tcp_rack rack;
+ u16 advmss;
+ u8 compressed_ack;
+ u8 dup_ack_counter;
+ u32 chrono_start;
+ u32 chrono_stat[3];
+ u8 chrono_type: 2;
+ u8 rate_app_limited: 1;
+ u8 fastopen_connect: 1;
+ u8 fastopen_no_cookie: 1;
+ u8 is_sack_reneg: 1;
+ u8 fastopen_client_fail: 2;
+ u8 nonagle: 4;
+ u8 thin_lto: 1;
+ u8 recvmsg_inq: 1;
+ u8 repair: 1;
+ u8 frto: 1;
+ u8 repair_queue;
+ u8 syn_data: 1;
+ u8 syn_fastopen: 1;
+ u8 syn_fastopen_exp: 1;
+ u8 syn_fastopen_ch: 1;
+ u8 syn_data_acked: 1;
+ u8 save_syn: 1;
+ u8 is_cwnd_limited: 1;
+ u8 syn_smc: 1;
+ u32 tlp_high_seq;
+ u32 tcp_tx_delay;
+ u64 tcp_wstamp_ns;
+ u64 tcp_clock_cache;
+ u64 tcp_mstamp;
+ u32 srtt_us;
+ u32 mdev_us;
+ u32 mdev_max_us;
+ u32 rttvar_us;
+ u32 rtt_seq;
+ struct minmax rtt_min;
+ u32 packets_out;
+ u32 retrans_out;
+ u32 max_packets_out;
+ u32 max_packets_seq;
+ u16 urg_data;
+ u8 ecn_flags;
+ u8 keepalive_probes;
+ u32 reordering;
+ u32 reord_seen;
+ u32 snd_up;
+ struct tcp_options_received rx_opt;
+ u32 snd_ssthresh;
+ u32 snd_cwnd;
+ u32 snd_cwnd_cnt;
+ u32 snd_cwnd_clamp;
+ u32 snd_cwnd_used;
+ u32 snd_cwnd_stamp;
+ u32 prior_cwnd;
+ u32 prr_delivered;
+ u32 prr_out;
+ u32 delivered;
+ u32 delivered_ce;
+ u32 lost;
+ u32 app_limited;
+ u64 first_tx_mstamp;
+ u64 delivered_mstamp;
+ u32 rate_delivered;
+ u32 rate_interval_us;
+ u32 rcv_wnd;
+ u32 write_seq;
+ u32 notsent_lowat;
+ u32 pushed_seq;
+ u32 lost_out;
+ u32 sacked_out;
+ struct hrtimer pacing_timer;
+ struct hrtimer compressed_ack_timer;
+ struct sk_buff *lost_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb;
+ struct tcp_sack_block duplicate_sack[1];
+ struct tcp_sack_block selective_acks[4];
+ struct tcp_sack_block recv_sack_cache[4];
+ struct sk_buff *highest_sack;
+ int lost_cnt_hint;
+ u32 prior_ssthresh;
+ u32 high_seq;
+ u32 retrans_stamp;
+ u32 undo_marker;
+ int undo_retrans;
+ u64 bytes_retrans;
+ u32 total_retrans;
+ u32 urg_seq;
+ unsigned int keepalive_time;
+ unsigned int keepalive_intvl;
+ int linger2;
+ u8 bpf_sock_ops_cb_flags;
+ u16 timeout_rehash;
+ u32 rcv_ooopack;
+ u32 rcv_rtt_last_tsecr;
+ struct {
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
+ } rcv_rtt_est;
+ struct {
+ u32 space;
+ u32 seq;
+ u64 time;
+ } rcvq_space;
+ struct {
+ u32 probe_seq_start;
+ u32 probe_seq_end;
+ } mtu_probe;
+ u32 mtu_info;
+ const struct tcp_sock_af_ops *af_specific;
+ struct tcp_md5sig_info *md5sig_info;
+ struct tcp_fastopen_request *fastopen_req;
+ struct request_sock *fastopen_rsk;
+ u32 *saved_syn;
+};
+
+struct tcp_sock_af_ops {
+ struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *);
+ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *);
+ int (*md5_parse)(struct sock *, int, char *, int);
+};
+
+struct tcp_md5sig_info {
+ struct hlist_head head;
+ struct callback_head rcu;
+};
+
+struct tcp_fastopen_request {
+ struct tcp_fastopen_cookie cookie;
+ struct msghdr *data;
+ size_t size;
+ int copied;
+ struct ubuf_info *uarg;
+};
+
+struct net_protocol {
+ int (*early_demux)(struct sk_buff *);
+ int (*early_demux_handler)(struct sk_buff *);
+ int (*handler)(struct sk_buff *);
+ int (*err_handler)(struct sk_buff *, u32);
+ unsigned int no_policy: 1;
+ unsigned int netns_ok: 1;
+ unsigned int icmp_strict_tag_validation: 1;
+};
+
+struct inet6_protocol {
+ void (*early_demux)(struct sk_buff *);
+ void (*early_demux_handler)(struct sk_buff *);
+ int (*handler)(struct sk_buff *);
+ int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32);
+ unsigned int flags;
+};
+
+struct net_offload {
+ struct offload_callbacks callbacks;
+ unsigned int flags;
+};
+
+struct cgroup_cls_state {
+ struct cgroup_subsys_state css;
+ u32 classid;
+};
+
+enum {
+ SK_MEMINFO_RMEM_ALLOC = 0,
+ SK_MEMINFO_RCVBUF = 1,
+ SK_MEMINFO_WMEM_ALLOC = 2,
+ SK_MEMINFO_SNDBUF = 3,
+ SK_MEMINFO_FWD_ALLOC = 4,
+ SK_MEMINFO_WMEM_QUEUED = 5,
+ SK_MEMINFO_OPTMEM = 6,
+ SK_MEMINFO_BACKLOG = 7,
+ SK_MEMINFO_DROPS = 8,
+ SK_MEMINFO_VARS = 9,
+};
+
+enum sknetlink_groups {
+ SKNLGRP_NONE = 0,
+ SKNLGRP_INET_TCP_DESTROY = 1,
+ SKNLGRP_INET_UDP_DESTROY = 2,
+ SKNLGRP_INET6_TCP_DESTROY = 3,
+ SKNLGRP_INET6_UDP_DESTROY = 4,
+ __SKNLGRP_MAX = 5,
+};
+
+struct inet_request_sock {
+ struct request_sock req;
+ u16 snd_wscale: 4;
+ u16 rcv_wscale: 4;
+ u16 tstamp_ok: 1;
+ u16 sack_ok: 1;
+ u16 wscale_ok: 1;
+ u16 ecn_ok: 1;
+ u16 acked: 1;
+ u16 no_srccheck: 1;
+ u16 smc_ok: 1;
+ u32 ir_mark;
+ union {
+ struct ip_options_rcu *ireq_opt;
+ struct {
+ struct ipv6_txoptions *ipv6_opt;
+ struct sk_buff *pktopts;
+ };
+ };
+};
+
+struct tcp_request_sock {
+ struct inet_request_sock req;
+ const struct tcp_request_sock_ops *af_specific;
+ u64 snt_synack;
+ bool tfo_listener;
+ bool is_mptcp;
+ u32 txhash;
+ u32 rcv_isn;
+ u32 snt_isn;
+ u32 ts_off;
+ u32 last_oow_ack_time;
+ u32 rcv_nxt;
+};
+
+enum {
+ SKB_FCLONE_UNAVAILABLE = 0,
+ SKB_FCLONE_ORIG = 1,
+ SKB_FCLONE_CLONE = 2,
+};
+
+struct sk_buff_fclones {
+ struct sk_buff skb1;
+ struct sk_buff skb2;
+ refcount_t fclone_ref;
+};
+
+struct skb_seq_state {
+ __u32 lower_offset;
+ __u32 upper_offset;
+ __u32 frag_idx;
+ __u32 stepped_offset;
+ struct sk_buff *root_skb;
+ struct sk_buff *cur_skb;
+ __u8 *frag_data;
+};
+
+struct skb_gso_cb {
+ union {
+ int mac_offset;
+ int data_offset;
+ };
+ int encap_level;
+ __wsum csum;
+ __u16 csum_start;
+};
+
+struct napi_gro_cb {
+ void *frag0;
+ unsigned int frag0_len;
+ int data_offset;
+ u16 flush;
+ u16 flush_id;
+ u16 count;
+ u16 gro_remcsum_start;
+ long unsigned int age;
+ u16 proto;
+ u8 same_flow: 1;
+ u8 encap_mark: 1;
+ u8 csum_valid: 1;
+ u8 csum_cnt: 3;
+ u8 free: 2;
+ u8 is_ipv6: 1;
+ u8 is_fou: 1;
+ u8 is_atomic: 1;
+ u8 recursion_counter: 4;
+ u8 is_flist: 1;
+ __wsum csum;
+ struct sk_buff *last;
+};
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct vlan_ethhdr {
+ unsigned char h_dest[6];
+ unsigned char h_source[6];
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+enum sctp_msg_flags {
+ MSG_NOTIFICATION = 32768,
+};
+
+struct ip_auth_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __be16 reserved;
+ __be32 spi;
+ __be32 seq_no;
+ __u8 auth_data[0];
+};
+
+struct frag_hdr {
+ __u8 nexthdr;
+ __u8 reserved;
+ __be16 frag_off;
+ __be32 identification;
+};
+
+enum {
+ SCM_TSTAMP_SND = 0,
+ SCM_TSTAMP_SCHED = 1,
+ SCM_TSTAMP_ACK = 2,
+};
+
+struct xfrm_offload {
+ struct {
+ __u32 low;
+ __u32 hi;
+ } seq;
+ __u32 flags;
+ __u32 status;
+ __u8 proto;
+};
+
+struct sec_path {
+ int len;
+ int olen;
+ struct xfrm_state *xvec[6];
+ struct xfrm_offload ovec[1];
+};
+
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
+struct napi_alloc_cache {
+ struct page_frag_cache page;
+ unsigned int skb_count;
+ void *skb_cache[64];
+};
+
+struct scm_cookie {
+ struct pid *pid;
+ struct scm_fp_list *fp;
+ struct scm_creds creds;
+};
+
+struct scm_timestamping {
+ struct __kernel_old_timespec ts[3];
+};
+
+struct scm_timestamping64 {
+ struct __kernel_timespec ts[3];
+};
+
+enum {
+ TCA_STATS_UNSPEC = 0,
+ TCA_STATS_BASIC = 1,
+ TCA_STATS_RATE_EST = 2,
+ TCA_STATS_QUEUE = 3,
+ TCA_STATS_APP = 4,
+ TCA_STATS_RATE_EST64 = 5,
+ TCA_STATS_PAD = 6,
+ TCA_STATS_BASIC_HW = 7,
+ TCA_STATS_PKT64 = 8,
+ __TCA_STATS_MAX = 9,
+};
+
+struct gnet_stats_basic {
+ __u64 bytes;
+ __u32 packets;
+};
+
+struct gnet_stats_rate_est {
+ __u32 bps;
+ __u32 pps;
+};
+
+struct gnet_stats_rate_est64 {
+ __u64 bps;
+ __u64 pps;
+};
+
+struct gnet_estimator {
+ signed char interval;
+ unsigned char ewma_log;
+};
+
+struct net_rate_estimator {
+ struct gnet_stats_basic_packed *bstats;
+ spinlock_t *stats_lock;
+ seqcount_t *running;
+ struct gnet_stats_basic_cpu *cpu_bstats;
+ u8 ewma_log;
+ u8 intvl_log;
+ seqcount_t seq;
+ u64 last_packets;
+ u64 last_bytes;
+ u64 avpps;
+ u64 avbps;
+ long unsigned int next_jiffies;
+ struct timer_list timer;
+ struct callback_head rcu;
+};
+
+enum {
+ RTM_BASE = 16,
+ RTM_NEWLINK = 16,
+ RTM_DELLINK = 17,
+ RTM_GETLINK = 18,
+ RTM_SETLINK = 19,
+ RTM_NEWADDR = 20,
+ RTM_DELADDR = 21,
+ RTM_GETADDR = 22,
+ RTM_NEWROUTE = 24,
+ RTM_DELROUTE = 25,
+ RTM_GETROUTE = 26,
+ RTM_NEWNEIGH = 28,
+ RTM_DELNEIGH = 29,
+ RTM_GETNEIGH = 30,
+ RTM_NEWRULE = 32,
+ RTM_DELRULE = 33,
+ RTM_GETRULE = 34,
+ RTM_NEWQDISC = 36,
+ RTM_DELQDISC = 37,
+ RTM_GETQDISC = 38,
+ RTM_NEWTCLASS = 40,
+ RTM_DELTCLASS = 41,
+ RTM_GETTCLASS = 42,
+ RTM_NEWTFILTER = 44,
+ RTM_DELTFILTER = 45,
+ RTM_GETTFILTER = 46,
+ RTM_NEWACTION = 48,
+ RTM_DELACTION = 49,
+ RTM_GETACTION = 50,
+ RTM_NEWPREFIX = 52,
+ RTM_GETMULTICAST = 58,
+ RTM_GETANYCAST = 62,
+ RTM_NEWNEIGHTBL = 64,
+ RTM_GETNEIGHTBL = 66,
+ RTM_SETNEIGHTBL = 67,
+ RTM_NEWNDUSEROPT = 68,
+ RTM_NEWADDRLABEL = 72,
+ RTM_DELADDRLABEL = 73,
+ RTM_GETADDRLABEL = 74,
+ RTM_GETDCB = 78,
+ RTM_SETDCB = 79,
+ RTM_NEWNETCONF = 80,
+ RTM_DELNETCONF = 81,
+ RTM_GETNETCONF = 82,
+ RTM_NEWMDB = 84,
+ RTM_DELMDB = 85,
+ RTM_GETMDB = 86,
+ RTM_NEWNSID = 88,
+ RTM_DELNSID = 89,
+ RTM_GETNSID = 90,
+ RTM_NEWSTATS = 92,
+ RTM_GETSTATS = 94,
+ RTM_NEWCACHEREPORT = 96,
+ RTM_NEWCHAIN = 100,
+ RTM_DELCHAIN = 101,
+ RTM_GETCHAIN = 102,
+ RTM_NEWNEXTHOP = 104,
+ RTM_DELNEXTHOP = 105,
+ RTM_GETNEXTHOP = 106,
+ RTM_NEWLINKPROP = 108,
+ RTM_DELLINKPROP = 109,
+ RTM_GETLINKPROP = 110,
+ RTM_NEWVLAN = 112,
+ RTM_DELVLAN = 113,
+ RTM_GETVLAN = 114,
+ __RTM_MAX = 115,
+};
+
+struct rtgenmsg {
+ unsigned char rtgen_family;
+};
+
+enum rtnetlink_groups {
+ RTNLGRP_NONE = 0,
+ RTNLGRP_LINK = 1,
+ RTNLGRP_NOTIFY = 2,
+ RTNLGRP_NEIGH = 3,
+ RTNLGRP_TC = 4,
+ RTNLGRP_IPV4_IFADDR = 5,
+ RTNLGRP_IPV4_MROUTE = 6,
+ RTNLGRP_IPV4_ROUTE = 7,
+ RTNLGRP_IPV4_RULE = 8,
+ RTNLGRP_IPV6_IFADDR = 9,
+ RTNLGRP_IPV6_MROUTE = 10,
+ RTNLGRP_IPV6_ROUTE = 11,
+ RTNLGRP_IPV6_IFINFO = 12,
+ RTNLGRP_DECnet_IFADDR = 13,
+ RTNLGRP_NOP2 = 14,
+ RTNLGRP_DECnet_ROUTE = 15,
+ RTNLGRP_DECnet_RULE = 16,
+ RTNLGRP_NOP4 = 17,
+ RTNLGRP_IPV6_PREFIX = 18,
+ RTNLGRP_IPV6_RULE = 19,
+ RTNLGRP_ND_USEROPT = 20,
+ RTNLGRP_PHONET_IFADDR = 21,
+ RTNLGRP_PHONET_ROUTE = 22,
+ RTNLGRP_DCB = 23,
+ RTNLGRP_IPV4_NETCONF = 24,
+ RTNLGRP_IPV6_NETCONF = 25,
+ RTNLGRP_MDB = 26,
+ RTNLGRP_MPLS_ROUTE = 27,
+ RTNLGRP_NSID = 28,
+ RTNLGRP_MPLS_NETCONF = 29,
+ RTNLGRP_IPV4_MROUTE_R = 30,
+ RTNLGRP_IPV6_MROUTE_R = 31,
+ RTNLGRP_NEXTHOP = 32,
+ RTNLGRP_BRVLAN = 33,
+ __RTNLGRP_MAX = 34,
+};
+
+enum {
+ NETNSA_NONE = 0,
+ NETNSA_NSID = 1,
+ NETNSA_PID = 2,
+ NETNSA_FD = 3,
+ NETNSA_TARGET_NSID = 4,
+ NETNSA_CURRENT_NSID = 5,
+ __NETNSA_MAX = 6,
+};
+
+enum rtnl_link_flags {
+ RTNL_FLAG_DOIT_UNLOCKED = 1,
+};
+
+struct net_fill_args {
+ u32 portid;
+ u32 seq;
+ int flags;
+ int cmd;
+ int nsid;
+ bool add_ref;
+ int ref_nsid;
+};
+
+struct rtnl_net_dump_cb {
+ struct net *tgt_net;
+ struct net *ref_net;
+ struct sk_buff *skb;
+ struct net_fill_args fillargs;
+ int idx;
+ int s_idx;
+};
+
+enum flow_dissect_ret {
+ FLOW_DISSECT_RET_OUT_GOOD = 0,
+ FLOW_DISSECT_RET_OUT_BAD = 1,
+ FLOW_DISSECT_RET_PROTO_AGAIN = 2,
+ FLOW_DISSECT_RET_IPPROTO_AGAIN = 3,
+ FLOW_DISSECT_RET_CONTINUE = 4,
+};
+
+struct flow_dissector_key_tags {
+ u32 flow_label;
+};
+
+struct flow_dissector_key_vlan {
+ union {
+ struct {
+ u16 vlan_id: 12;
+ u16 vlan_dei: 1;
+ u16 vlan_priority: 3;
+ };
+ __be16 vlan_tci;
+ };
+ __be16 vlan_tpid;
+};
+
+struct flow_dissector_mpls_lse {
+ u32 mpls_ttl: 8;
+ u32 mpls_bos: 1;
+ u32 mpls_tc: 3;
+ u32 mpls_label: 20;
+};
+
+struct flow_dissector_key_mpls {
+ struct flow_dissector_mpls_lse ls[7];
+ u8 used_lses;
+};
+
+struct flow_dissector_key_enc_opts {
+ u8 data[255];
+ u8 len;
+ __be16 dst_opt_type;
+};
+
+struct flow_dissector_key_keyid {
+ __be32 keyid;
+};
+
+struct flow_dissector_key_ipv4_addrs {
+ __be32 src;
+ __be32 dst;
+};
+
+struct flow_dissector_key_ipv6_addrs {
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
+struct flow_dissector_key_tipc {
+ __be32 key;
+};
+
+struct flow_dissector_key_addrs {
+ union {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc tipckey;
+ };
+};
+
+struct flow_dissector_key_arp {
+ __u32 sip;
+ __u32 tip;
+ __u8 op;
+ unsigned char sha[6];
+ unsigned char tha[6];
+};
+
+struct flow_dissector_key_ports {
+ union {
+ __be32 ports;
+ struct {
+ __be16 src;
+ __be16 dst;
+ };
+ };
+};
+
+struct flow_dissector_key_icmp {
+ struct {
+ u8 type;
+ u8 code;
+ };
+ u16 id;
+};
+
+struct flow_dissector_key_eth_addrs {
+ unsigned char dst[6];
+ unsigned char src[6];
+};
+
+struct flow_dissector_key_tcp {
+ __be16 flags;
+};
+
+struct flow_dissector_key_ip {
+ __u8 tos;
+ __u8 ttl;
+};
+
+struct flow_dissector_key_meta {
+ int ingress_ifindex;
+ u16 ingress_iftype;
+};
+
+struct flow_dissector_key_ct {
+ u16 ct_state;
+ u16 ct_zone;
+ u32 ct_mark;
+ u32 ct_labels[4];
+};
+
+struct flow_dissector_key {
+ enum flow_dissector_key_id key_id;
+ size_t offset;
+};
+
+struct flow_keys {
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+ struct flow_dissector_key_keyid keyid;
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_icmp icmp;
+ struct flow_dissector_key_addrs addrs;
+ int: 32;
+};
+
+struct flow_keys_digest {
+ u8 data[16];
+};
+
+struct xt_table_info;
+
+struct xt_table {
+ struct list_head list;
+ unsigned int valid_hooks;
+ struct xt_table_info *private;
+ struct module *me;
+ u_int8_t af;
+ int priority;
+ int (*table_init)(struct net *);
+ const char name[32];
+};
+
+struct nf_ct_event;
+
+struct nf_ct_event_notifier {
+ int (*fcn)(unsigned int, struct nf_ct_event *);
+};
+
+struct nf_exp_event;
+
+struct nf_exp_event_notifier {
+ int (*fcn)(unsigned int, struct nf_exp_event *);
+};
+
+enum bpf_ret_code {
+ BPF_OK = 0,
+ BPF_DROP = 2,
+ BPF_REDIRECT = 7,
+ BPF_LWT_REROUTE = 128,
+};
+
+enum {
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1,
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2,
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4,
+};
+
+enum devlink_port_type {
+ DEVLINK_PORT_TYPE_NOTSET = 0,
+ DEVLINK_PORT_TYPE_AUTO = 1,
+ DEVLINK_PORT_TYPE_ETH = 2,
+ DEVLINK_PORT_TYPE_IB = 3,
+};
+
+enum devlink_port_flavour {
+ DEVLINK_PORT_FLAVOUR_PHYSICAL = 0,
+ DEVLINK_PORT_FLAVOUR_CPU = 1,
+ DEVLINK_PORT_FLAVOUR_DSA = 2,
+ DEVLINK_PORT_FLAVOUR_PCI_PF = 3,
+ DEVLINK_PORT_FLAVOUR_PCI_VF = 4,
+ DEVLINK_PORT_FLAVOUR_VIRTUAL = 5,
+};
+
+struct devlink_port_phys_attrs {
+ u32 port_number;
+ u32 split_subport_number;
+};
+
+struct devlink_port_pci_pf_attrs {
+ u16 pf;
+};
+
+struct devlink_port_pci_vf_attrs {
+ u16 pf;
+ u16 vf;
+};
+
+struct devlink_port_attrs {
+ u8 set: 1;
+ u8 split: 1;
+ u8 switch_port: 1;
+ enum devlink_port_flavour flavour;
+ struct netdev_phys_item_id switch_id;
+ union {
+ struct devlink_port_phys_attrs phys;
+ struct devlink_port_pci_pf_attrs pci_pf;
+ struct devlink_port_pci_vf_attrs pci_vf;
+ };
+};
+
+struct devlink;
+
+struct devlink_port {
+ struct list_head list;
+ struct list_head param_list;
+ struct devlink *devlink;
+ unsigned int index;
+ bool registered;
+ spinlock_t type_lock;
+ enum devlink_port_type type;
+ enum devlink_port_type desired_type;
+ void *type_dev;
+ struct devlink_port_attrs attrs;
+ struct delayed_work type_warn_dw;
+};
+
+struct ip_tunnel_parm {
+ char name[16];
+ int link;
+ __be16 i_flags;
+ __be16 o_flags;
+ __be32 i_key;
+ __be32 o_key;
+ struct iphdr iph;
+};
+
+struct ip_tunnel_key {
+ __be64 tun_id;
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ipv4;
+ struct {
+ struct in6_addr src;
+ struct in6_addr dst;
+ } ipv6;
+ } u;
+ __be16 tun_flags;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
+ __be16 tp_src;
+ __be16 tp_dst;
+};
+
+struct dst_cache_pcpu;
+
+struct dst_cache {
+ struct dst_cache_pcpu *cache;
+ long unsigned int reset_ts;
+};
+
+struct ip_tunnel_info {
+ struct ip_tunnel_key key;
+ struct dst_cache dst_cache;
+ u8 options_len;
+ u8 mode;
+};
+
+struct lwtunnel_state {
+ __u16 type;
+ __u16 flags;
+ __u16 headroom;
+ atomic_t refcnt;
+ int (*orig_output)(struct net *, struct sock *, struct sk_buff *);
+ int (*orig_input)(struct sk_buff *);
+ struct callback_head rcu;
+ __u8 data[0];
+};
+
+union tcp_word_hdr {
+ struct tcphdr hdr;
+ __be32 words[5];
+};
+
+enum devlink_sb_pool_type {
+ DEVLINK_SB_POOL_TYPE_INGRESS = 0,
+ DEVLINK_SB_POOL_TYPE_EGRESS = 1,
+};
+
+enum devlink_sb_threshold_type {
+ DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0,
+ DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1,
+};
+
+enum devlink_eswitch_encap_mode {
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0,
+ DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1,
+};
+
+enum devlink_trap_action {
+ DEVLINK_TRAP_ACTION_DROP = 0,
+ DEVLINK_TRAP_ACTION_TRAP = 1,
+ DEVLINK_TRAP_ACTION_MIRROR = 2,
+};
+
+enum devlink_trap_type {
+ DEVLINK_TRAP_TYPE_DROP = 0,
+ DEVLINK_TRAP_TYPE_EXCEPTION = 1,
+ DEVLINK_TRAP_TYPE_CONTROL = 2,
+};
+
+enum devlink_dpipe_field_mapping_type {
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0,
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1,
+};
+
+struct devlink_dpipe_headers;
+
+struct devlink_ops;
+
+struct devlink {
+ struct list_head list;
+ struct list_head port_list;
+ struct list_head sb_list;
+ struct list_head dpipe_table_list;
+ struct list_head resource_list;
+ struct list_head param_list;
+ struct list_head region_list;
+ struct list_head reporter_list;
+ struct mutex reporters_lock;
+ struct devlink_dpipe_headers *dpipe_headers;
+ struct list_head trap_list;
+ struct list_head trap_group_list;
+ struct list_head trap_policer_list;
+ const struct devlink_ops *ops;
+ struct xarray snapshot_ids;
+ struct device *dev;
+ possible_net_t _net;
+ struct mutex lock;
+ u8 reload_failed: 1;
+ u8 reload_enabled: 1;
+ u8 registered: 1;
+ long: 61;
+ long: 64;
+ long: 64;
+ long: 64;
+ char priv[0];
+};
+
+struct devlink_dpipe_header;
+
+struct devlink_dpipe_headers {
+ struct devlink_dpipe_header **headers;
+ unsigned int headers_count;
+};
+
+struct devlink_info_req;
+
+struct devlink_sb_pool_info;
+
+struct devlink_trap;
+
+struct devlink_trap_group;
+
+struct devlink_trap_policer;
+
+struct devlink_ops {
+ int (*reload_down)(struct devlink *, bool, struct netlink_ext_ack *);
+ int (*reload_up)(struct devlink *, struct netlink_ext_ack *);
+ int (*port_type_set)(struct devlink_port *, enum devlink_port_type);
+ int (*port_split)(struct devlink *, unsigned int, unsigned int, struct netlink_ext_ack *);
+ int (*port_unsplit)(struct devlink *, unsigned int, struct netlink_ext_ack *);
+ int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *);
+ int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *);
+ int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *);
+ int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32, struct netlink_ext_ack *);
+ int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *);
+ int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *);
+ int (*sb_occ_snapshot)(struct devlink *, unsigned int);
+ int (*sb_occ_max_clear)(struct devlink *, unsigned int);
+ int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *);
+ int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *);
+ int (*eswitch_mode_get)(struct devlink *, u16 *);
+ int (*eswitch_mode_set)(struct devlink *, u16, struct netlink_ext_ack *);
+ int (*eswitch_inline_mode_get)(struct devlink *, u8 *);
+ int (*eswitch_inline_mode_set)(struct devlink *, u8, struct netlink_ext_ack *);
+ int (*eswitch_encap_mode_get)(struct devlink *, enum devlink_eswitch_encap_mode *);
+ int (*eswitch_encap_mode_set)(struct devlink *, enum devlink_eswitch_encap_mode, struct netlink_ext_ack *);
+ int (*info_get)(struct devlink *, struct devlink_info_req *, struct netlink_ext_ack *);
+ int (*flash_update)(struct devlink *, const char *, const char *, struct netlink_ext_ack *);
+ int (*trap_init)(struct devlink *, const struct devlink_trap *, void *);
+ void (*trap_fini)(struct devlink *, const struct devlink_trap *, void *);
+ int (*trap_action_set)(struct devlink *, const struct devlink_trap *, enum devlink_trap_action);
+ int (*trap_group_init)(struct devlink *, const struct devlink_trap_group *);
+ int (*trap_group_set)(struct devlink *, const struct devlink_trap_group *, const struct devlink_trap_policer *);
+ int (*trap_policer_init)(struct devlink *, const struct devlink_trap_policer *);
+ void (*trap_policer_fini)(struct devlink *, const struct devlink_trap_policer *);
+ int (*trap_policer_set)(struct devlink *, const struct devlink_trap_policer *, u64, u64, struct netlink_ext_ack *);
+ int (*trap_policer_counter_get)(struct devlink *, const struct devlink_trap_policer *, u64 *);
+};
+
+struct devlink_sb_pool_info {
+ enum devlink_sb_pool_type pool_type;
+ u32 size;
+ enum devlink_sb_threshold_type threshold_type;
+ u32 cell_size;
+};
+
+struct devlink_dpipe_field {
+ const char *name;
+ unsigned int id;
+ unsigned int bitwidth;
+ enum devlink_dpipe_field_mapping_type mapping_type;
+};
+
+struct devlink_dpipe_header {
+ const char *name;
+ unsigned int id;
+ struct devlink_dpipe_field *fields;
+ unsigned int fields_count;
+ bool global;
+};
+
+struct devlink_trap_policer {
+ u32 id;
+ u64 init_rate;
+ u64 init_burst;
+ u64 max_rate;
+ u64 min_rate;
+ u64 max_burst;
+ u64 min_burst;
+};
+
+struct devlink_trap_group {
+ const char *name;
+ u16 id;
+ bool generic;
+ u32 init_policer_id;
+};
+
+struct devlink_trap {
+ enum devlink_trap_type type;
+ enum devlink_trap_action init_action;
+ bool generic;
+ u16 id;
+ const char *name;
+ u16 init_group_id;
+ u32 metadata_cap;
+};
+
+struct arphdr {
+ __be16 ar_hrd;
+ __be16 ar_pro;
+ unsigned char ar_hln;
+ unsigned char ar_pln;
+ __be16 ar_op;
+};
+
+struct fib_info;
+
+struct fib_nh {
+ struct fib_nh_common nh_common;
+ struct hlist_node nh_hash;
+ struct fib_info *nh_parent;
+ __u32 nh_tclassid;
+ __be32 nh_saddr;
+ int nh_saddr_genid;
+};
+
+struct fib_info {
+ struct hlist_node fib_hash;
+ struct hlist_node fib_lhash;
+ struct list_head nh_list;
+ struct net *fib_net;
+ int fib_treeref;
+ refcount_t fib_clntref;
+ unsigned int fib_flags;
+ unsigned char fib_dead;
+ unsigned char fib_protocol;
+ unsigned char fib_scope;
+ unsigned char fib_type;
+ __be32 fib_prefsrc;
+ u32 fib_tb_id;
+ u32 fib_priority;
+ struct dst_metrics *fib_metrics;
+ int fib_nhs;
+ bool fib_nh_is_v6;
+ bool nh_updated;
+ struct nexthop *nh;
+ struct callback_head rcu;
+ struct fib_nh fib_nh[0];
+};
+
+struct nh_info;
+
+struct nh_group;
+
+struct nexthop {
+ struct rb_node rb_node;
+ struct list_head fi_list;
+ struct list_head f6i_list;
+ struct list_head fdb_list;
+ struct list_head grp_list;
+ struct net *net;
+ u32 id;
+ u8 protocol;
+ u8 nh_flags;
+ bool is_group;
+ refcount_t refcnt;
+ struct callback_head rcu;
+ union {
+ struct nh_info *nh_info;
+ struct nh_group *nh_grp;
+ };
+};
+
+struct nh_info {
+ struct hlist_node dev_hash;
+ struct nexthop *nh_parent;
+ u8 family;
+ bool reject_nh;
+ bool fdb_nh;
+ union {
+ struct fib_nh_common fib_nhc;
+ struct fib_nh fib_nh;
+ struct fib6_nh fib6_nh;
+ };
+};
+
+struct nh_grp_entry {
+ struct nexthop *nh;
+ u8 weight;
+ atomic_t upper_bound;
+ struct list_head nh_list;
+ struct nexthop *nh_parent;
+};
+
+struct nh_group {
+ struct nh_group *spare;
+ u16 num_nh;
+ bool mpath;
+ bool fdb_nh;
+ bool has_v4;
+ struct nh_grp_entry nh_entries[0];
+};
+
+struct ip_tunnel_encap {
+ u16 type;
+ u16 flags;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *);
+ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *);
+ int (*err_handler)(struct sk_buff *, u32);
+};
+
+enum metadata_type {
+ METADATA_IP_TUNNEL = 0,
+ METADATA_HW_PORT_MUX = 1,
+};
+
+struct hw_port_info {
+ struct net_device *lower_dev;
+ u32 port_id;
+};
+
+struct metadata_dst {
+ struct dst_entry dst;
+ enum metadata_type type;
+ union {
+ struct ip_tunnel_info tun_info;
+ struct hw_port_info port_info;
+ } u;
+};
+
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
+struct gre_full_hdr {
+ struct gre_base_hdr fixed_header;
+ __be16 csum;
+ __be16 reserved1;
+ __be32 key;
+ __be32 seq;
+};
+
+struct pptp_gre_header {
+ struct gre_base_hdr gre_hd;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
+};
+
+struct tipc_basic_hdr {
+ __be32 w[4];
+};
+
+struct icmphdr {
+ __u8 type;
+ __u8 code;
+ __sum16 checksum;
+ union {
+ struct {
+ __be16 id;
+ __be16 sequence;
+ } echo;
+ __be32 gateway;
+ struct {
+ __be16 __unused;
+ __be16 mtu;
+ } frag;
+ __u8 reserved[4];
+ } un;
+};
+
+enum dccp_state {
+ DCCP_OPEN = 1,
+ DCCP_REQUESTING = 2,
+ DCCP_LISTEN = 10,
+ DCCP_RESPOND = 3,
+ DCCP_ACTIVE_CLOSEREQ = 4,
+ DCCP_PASSIVE_CLOSE = 8,
+ DCCP_CLOSING = 11,
+ DCCP_TIME_WAIT = 6,
+ DCCP_CLOSED = 7,
+ DCCP_NEW_SYN_RECV = 12,
+ DCCP_PARTOPEN = 13,
+ DCCP_PASSIVE_CLOSEREQ = 14,
+ DCCP_MAX_STATES = 15,
+};
+
+enum l2tp_debug_flags {
+ L2TP_MSG_DEBUG = 1,
+ L2TP_MSG_CONTROL = 2,
+ L2TP_MSG_SEQ = 4,
+ L2TP_MSG_DATA = 8,
+};
+
+struct pppoe_tag {
+ __be16 tag_type;
+ __be16 tag_len;
+ char tag_data[0];
+};
+
+struct pppoe_hdr {
+ __u8 type: 4;
+ __u8 ver: 4;
+ __u8 code;
+ __be16 sid;
+ __be16 length;
+ struct pppoe_tag tag[0];
+};
+
+struct mpls_label {
+ __be32 entry;
+};
+
+enum batadv_packettype {
+ BATADV_IV_OGM = 0,
+ BATADV_BCAST = 1,
+ BATADV_CODED = 2,
+ BATADV_ELP = 3,
+ BATADV_OGM2 = 4,
+ BATADV_UNICAST = 64,
+ BATADV_UNICAST_FRAG = 65,
+ BATADV_UNICAST_4ADDR = 66,
+ BATADV_ICMP = 67,
+ BATADV_UNICAST_TVLV = 68,
+};
+
+struct batadv_unicast_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 ttvn;
+ __u8 dest[6];
+};
+
+struct xt_table_info {
+ unsigned int size;
+ unsigned int number;
+ unsigned int initial_entries;
+ unsigned int hook_entry[5];
+ unsigned int underflow[5];
+ unsigned int stacksize;
+ void ***jumpstack;
+ unsigned char entries[0];
+};
+
+struct nf_conntrack_tuple_mask {
+ struct {
+ union nf_inet_addr u3;
+ union nf_conntrack_man_proto u;
+ } src;
+};
+
+struct nf_conntrack_l4proto {
+ u_int8_t l4proto;
+ bool allow_clash;
+ u16 nlattr_size;
+ bool (*can_early_drop)(const struct nf_conn *);
+ int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *);
+ int (*from_nlattr)(struct nlattr **, struct nf_conn *);
+ int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *);
+ unsigned int (*nlattr_tuple_size)();
+ int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t);
+ const struct nla_policy *nla_policy;
+ struct {
+ int (*nlattr_to_obj)(struct nlattr **, struct net *, void *);
+ int (*obj_to_nlattr)(struct sk_buff *, const void *);
+ u16 obj_size;
+ u16 nlattr_max;
+ const struct nla_policy *nla_policy;
+ } ctnl_timeout;
+};
+
+struct nf_ct_ext {
+ u8 offset[9];
+ u8 len;
+ char data[0];
+};
+
+struct nf_conntrack_helper;
+
+struct nf_conntrack_expect {
+ struct hlist_node lnode;
+ struct hlist_node hnode;
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_mask mask;
+ void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *);
+ struct nf_conntrack_helper *helper;
+ struct nf_conn *master;
+ struct timer_list timeout;
+ refcount_t use;
+ unsigned int flags;
+ unsigned int class;
+ union nf_inet_addr saved_addr;
+ union nf_conntrack_man_proto saved_proto;
+ enum ip_conntrack_dir dir;
+ struct callback_head rcu;
+};
+
+enum nf_ct_ext_id {
+ NF_CT_EXT_HELPER = 0,
+ NF_CT_EXT_NAT = 1,
+ NF_CT_EXT_SEQADJ = 2,
+ NF_CT_EXT_ACCT = 3,
+ NF_CT_EXT_ECACHE = 4,
+ NF_CT_EXT_TSTAMP = 5,
+ NF_CT_EXT_TIMEOUT = 6,
+ NF_CT_EXT_LABELS = 7,
+ NF_CT_EXT_SYNPROXY = 8,
+ NF_CT_EXT_NUM = 9,
+};
+
+struct nf_ct_event {
+ struct nf_conn *ct;
+ u32 portid;
+ int report;
+};
+
+struct nf_exp_event {
+ struct nf_conntrack_expect *exp;
+ u32 portid;
+ int report;
+};
+
+struct nf_conn_labels {
+ long unsigned int bits[2];
+};
+
+struct _flow_keys_digest_data {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+ __be32 ports;
+ __be32 src;
+ __be32 dst;
+};
+
+struct qdisc_walker {
+ int stop;
+ int skip;
+ int count;
+ int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *);
+};
+
+enum {
+ IF_OPER_UNKNOWN = 0,
+ IF_OPER_NOTPRESENT = 1,
+ IF_OPER_DOWN = 2,
+ IF_OPER_LOWERLAYERDOWN = 3,
+ IF_OPER_TESTING = 4,
+ IF_OPER_DORMANT = 5,
+ IF_OPER_UP = 6,
+};
+
+struct ipv4_devconf {
+ void *sysctl;
+ int data[32];
+ long unsigned int state[1];
+};
+
+enum nf_dev_hooks {
+ NF_NETDEV_INGRESS = 0,
+ NF_NETDEV_NUMHOOKS = 1,
+};
+
+struct ifbond {
+ __s32 bond_mode;
+ __s32 num_slaves;
+ __s32 miimon;
+};
+
+typedef struct ifbond ifbond;
+
+struct ifslave {
+ __s32 slave_id;
+ char slave_name[16];
+ __s8 link;
+ __s8 state;
+ __u32 link_failure_count;
+};
+
+typedef struct ifslave ifslave;
+
+struct netdev_boot_setup {
+ char name[16];
+ struct ifmap map;
+};
+
+enum {
+ NAPIF_STATE_SCHED = 1,
+ NAPIF_STATE_MISSED = 2,
+ NAPIF_STATE_DISABLE = 4,
+ NAPIF_STATE_NPSVC = 8,
+ NAPIF_STATE_HASHED = 16,
+ NAPIF_STATE_NO_BUSY_POLL = 32,
+ NAPIF_STATE_IN_BUSY_POLL = 64,
+};
+
+enum gro_result {
+ GRO_MERGED = 0,
+ GRO_MERGED_FREE = 1,
+ GRO_HELD = 2,
+ GRO_NORMAL = 3,
+ GRO_DROP = 4,
+ GRO_CONSUMED = 5,
+};
+
+typedef enum gro_result gro_result_t;
+
+struct netdev_net_notifier {
+ struct list_head list;
+ struct notifier_block *nb;
+};
+
+struct udp_tunnel_info {
+ short unsigned int type;
+ sa_family_t sa_family;
+ __be16 port;
+};
+
+struct in_ifaddr;
+
+struct ip_mc_list;
+
+struct in_device {
+ struct net_device *dev;
+ refcount_t refcnt;
+ int dead;
+ struct in_ifaddr *ifa_list;
+ struct ip_mc_list *mc_list;
+ struct ip_mc_list **mc_hash;
+ int mc_count;
+ spinlock_t mc_tomb_lock;
+ struct ip_mc_list *mc_tomb;
+ long unsigned int mr_v1_seen;
+ long unsigned int mr_v2_seen;
+ long unsigned int mr_maxdelay;
+ long unsigned int mr_qi;
+ long unsigned int mr_qri;
+ unsigned char mr_qrv;
+ unsigned char mr_gq_running;
+ unsigned char mr_ifc_count;
+ struct timer_list mr_gq_timer;
+ struct timer_list mr_ifc_timer;
+ struct neigh_parms *arp_parms;
+ struct ipv4_devconf cnf;
+ struct callback_head callback_head;
+};
+
+struct packet_type {
+ __be16 type;
+ bool ignore_outgoing;
+ struct net_device *dev;
+ int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+ void (*list_func)(struct list_head *, struct packet_type *, struct net_device *);
+ bool (*id_match)(struct packet_type *, struct sock *);
+ void *af_packet_priv;
+ struct list_head list;
+};
+
+struct packet_offload {
+ __be16 type;
+ u16 priority;
+ struct offload_callbacks callbacks;
+ struct list_head list;
+};
+
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info;
+ union {
+ u32 mtu;
+ } ext;
+};
+
+struct netdev_notifier_change_info {
+ struct netdev_notifier_info info;
+ unsigned int flags_changed;
+};
+
+struct netdev_notifier_changeupper_info {
+ struct netdev_notifier_info info;
+ struct net_device *upper_dev;
+ bool master;
+ bool linking;
+ void *upper_info;
+};
+
+struct netdev_notifier_changelowerstate_info {
+ struct netdev_notifier_info info;
+ void *lower_state_info;
+};
+
+struct netdev_notifier_pre_changeaddr_info {
+ struct netdev_notifier_info info;
+ const unsigned char *dev_addr;
+};
+
+typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *);
+
+struct netdev_bonding_info {
+ ifslave slave;
+ ifbond master;
+};
+
+struct netdev_notifier_bonding_info {
+ struct netdev_notifier_info info;
+ struct netdev_bonding_info bonding_info;
+};
+
+enum qdisc_state_t {
+ __QDISC_STATE_SCHED = 0,
+ __QDISC_STATE_DEACTIVATED = 1,
+};
+
+struct tcf_walker {
+ int stop;
+ int skip;
+ int count;
+ bool nonempty;
+ long unsigned int cookie;
+ int (*fn)(struct tcf_proto *, void *, struct tcf_walker *);
+};
+
+struct udp_hslot;
+
+struct udp_table {
+ struct udp_hslot *hash;
+ struct udp_hslot *hash2;
+ unsigned int mask;
+ unsigned int log;
+};
+
+enum {
+ IPV4_DEVCONF_FORWARDING = 1,
+ IPV4_DEVCONF_MC_FORWARDING = 2,
+ IPV4_DEVCONF_PROXY_ARP = 3,
+ IPV4_DEVCONF_ACCEPT_REDIRECTS = 4,
+ IPV4_DEVCONF_SECURE_REDIRECTS = 5,
+ IPV4_DEVCONF_SEND_REDIRECTS = 6,
+ IPV4_DEVCONF_SHARED_MEDIA = 7,
+ IPV4_DEVCONF_RP_FILTER = 8,
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9,
+ IPV4_DEVCONF_BOOTP_RELAY = 10,
+ IPV4_DEVCONF_LOG_MARTIANS = 11,
+ IPV4_DEVCONF_TAG = 12,
+ IPV4_DEVCONF_ARPFILTER = 13,
+ IPV4_DEVCONF_MEDIUM_ID = 14,
+ IPV4_DEVCONF_NOXFRM = 15,
+ IPV4_DEVCONF_NOPOLICY = 16,
+ IPV4_DEVCONF_FORCE_IGMP_VERSION = 17,
+ IPV4_DEVCONF_ARP_ANNOUNCE = 18,
+ IPV4_DEVCONF_ARP_IGNORE = 19,
+ IPV4_DEVCONF_PROMOTE_SECONDARIES = 20,
+ IPV4_DEVCONF_ARP_ACCEPT = 21,
+ IPV4_DEVCONF_ARP_NOTIFY = 22,
+ IPV4_DEVCONF_ACCEPT_LOCAL = 23,
+ IPV4_DEVCONF_SRC_VMARK = 24,
+ IPV4_DEVCONF_PROXY_ARP_PVLAN = 25,
+ IPV4_DEVCONF_ROUTE_LOCALNET = 26,
+ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27,
+ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28,
+ IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29,
+ IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30,
+ IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31,
+ IPV4_DEVCONF_BC_FORWARDING = 32,
+ __IPV4_DEVCONF_MAX = 33,
+};
+
+struct in_ifaddr {
+ struct hlist_node hash;
+ struct in_ifaddr *ifa_next;
+ struct in_device *ifa_dev;
+ struct callback_head callback_head;
+ __be32 ifa_local;
+ __be32 ifa_address;
+ __be32 ifa_mask;
+ __u32 ifa_rt_priority;
+ __be32 ifa_broadcast;
+ unsigned char ifa_scope;
+ unsigned char ifa_prefixlen;
+ __u32 ifa_flags;
+ char ifa_label[16];
+ __u32 ifa_valid_lft;
+ __u32 ifa_preferred_lft;
+ long unsigned int ifa_cstamp;
+ long unsigned int ifa_tstamp;
+};
+
+struct udp_hslot {
+ struct hlist_head head;
+ int count;
+ spinlock_t lock;
+};
+
+struct dev_kfree_skb_cb {
+ enum skb_free_reason reason;
+};
+
+struct netdev_adjacent {
+ struct net_device *dev;
+ bool master;
+ bool ignore;
+ u16 ref_nr;
+ void *private;
+ struct list_head list;
+ struct callback_head rcu;
+};
+
+typedef struct sk_buff *pto_T_____30;
+
+typedef u16 pao_T_____7;
+
+struct xfrm_dst {
+ union {
+ struct dst_entry dst;
+ struct rtable rt;
+ struct rt6_info rt6;
+ } u;
+ struct dst_entry *route;
+ struct dst_entry *child;
+ struct dst_entry *path;
+ struct xfrm_policy *pols[2];
+ int num_pols;
+ int num_xfrms;
+ u32 xfrm_genid;
+ u32 policy_genid;
+ u32 route_mtu_cached;
+ u32 child_mtu_cached;
+ u32 route_cookie;
+ u32 path_cookie;
+};
+
+enum {
+ NDA_UNSPEC = 0,
+ NDA_DST = 1,
+ NDA_LLADDR = 2,
+ NDA_CACHEINFO = 3,
+ NDA_PROBES = 4,
+ NDA_VLAN = 5,
+ NDA_PORT = 6,
+ NDA_VNI = 7,
+ NDA_IFINDEX = 8,
+ NDA_MASTER = 9,
+ NDA_LINK_NETNSID = 10,
+ NDA_SRC_VNI = 11,
+ NDA_PROTOCOL = 12,
+ NDA_NH_ID = 13,
+ __NDA_MAX = 14,
+};
+
+struct nda_cacheinfo {
+ __u32 ndm_confirmed;
+ __u32 ndm_used;
+ __u32 ndm_updated;
+ __u32 ndm_refcnt;
+};
+
+struct ndt_stats {
+ __u64 ndts_allocs;
+ __u64 ndts_destroys;
+ __u64 ndts_hash_grows;
+ __u64 ndts_res_failed;
+ __u64 ndts_lookups;
+ __u64 ndts_hits;
+ __u64 ndts_rcv_probes_mcast;
+ __u64 ndts_rcv_probes_ucast;
+ __u64 ndts_periodic_gc_runs;
+ __u64 ndts_forced_gc_runs;
+ __u64 ndts_table_fulls;
+};
+
+enum {
+ NDTPA_UNSPEC = 0,
+ NDTPA_IFINDEX = 1,
+ NDTPA_REFCNT = 2,
+ NDTPA_REACHABLE_TIME = 3,
+ NDTPA_BASE_REACHABLE_TIME = 4,
+ NDTPA_RETRANS_TIME = 5,
+ NDTPA_GC_STALETIME = 6,
+ NDTPA_DELAY_PROBE_TIME = 7,
+ NDTPA_QUEUE_LEN = 8,
+ NDTPA_APP_PROBES = 9,
+ NDTPA_UCAST_PROBES = 10,
+ NDTPA_MCAST_PROBES = 11,
+ NDTPA_ANYCAST_DELAY = 12,
+ NDTPA_PROXY_DELAY = 13,
+ NDTPA_PROXY_QLEN = 14,
+ NDTPA_LOCKTIME = 15,
+ NDTPA_QUEUE_LENBYTES = 16,
+ NDTPA_MCAST_REPROBES = 17,
+ NDTPA_PAD = 18,
+ __NDTPA_MAX = 19,
+};
+
+struct ndtmsg {
+ __u8 ndtm_family;
+ __u8 ndtm_pad1;
+ __u16 ndtm_pad2;
+};
+
+struct ndt_config {
+ __u16 ndtc_key_len;
+ __u16 ndtc_entry_size;
+ __u32 ndtc_entries;
+ __u32 ndtc_last_flush;
+ __u32 ndtc_last_rand;
+ __u32 ndtc_hash_rnd;
+ __u32 ndtc_hash_mask;
+ __u32 ndtc_hash_chain_gc;
+ __u32 ndtc_proxy_qlen;
+};
+
+enum {
+ NDTA_UNSPEC = 0,
+ NDTA_NAME = 1,
+ NDTA_THRESH1 = 2,
+ NDTA_THRESH2 = 3,
+ NDTA_THRESH3 = 4,
+ NDTA_CONFIG = 5,
+ NDTA_PARMS = 6,
+ NDTA_STATS = 7,
+ NDTA_GC_INTERVAL = 8,
+ NDTA_PAD = 9,
+ __NDTA_MAX = 10,
+};
+
+enum {
+ RTN_UNSPEC = 0,
+ RTN_UNICAST = 1,
+ RTN_LOCAL = 2,
+ RTN_BROADCAST = 3,
+ RTN_ANYCAST = 4,
+ RTN_MULTICAST = 5,
+ RTN_BLACKHOLE = 6,
+ RTN_UNREACHABLE = 7,
+ RTN_PROHIBIT = 8,
+ RTN_THROW = 9,
+ RTN_NAT = 10,
+ RTN_XRESOLVE = 11,
+ __RTN_MAX = 12,
+};
+
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_DN_TABLE = 2,
+ NEIGH_NR_TABLES = 3,
+ NEIGH_LINK_TABLE = 3,
+};
+
+struct neigh_seq_state {
+ struct seq_net_private p;
+ struct neigh_table *tbl;
+ struct neigh_hash_table *nht;
+ void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *);
+ unsigned int bucket;
+ unsigned int flags;
+};
+
+struct neighbour_cb {
+ long unsigned int sched_next;
+ unsigned int flags;
+};
+
+enum netevent_notif_type {
+ NETEVENT_NEIGH_UPDATE = 1,
+ NETEVENT_REDIRECT = 2,
+ NETEVENT_DELAY_PROBE_TIME_UPDATE = 3,
+ NETEVENT_IPV4_MPATH_HASH_UPDATE = 4,
+ NETEVENT_IPV6_MPATH_HASH_UPDATE = 5,
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6,
+};
+
+struct neigh_dump_filter {
+ int master_idx;
+ int dev_idx;
+};
+
+struct neigh_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table neigh_vars[21];
+};
+
+struct netlink_dump_control {
+ int (*start)(struct netlink_callback *);
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
+ int (*done)(struct netlink_callback *);
+ void *data;
+ struct module *module;
+ u16 min_dump_alloc;
+};
+
+struct rtnl_link_stats {
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
+ __u32 collisions;
+ __u32 rx_length_errors;
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
+ __u32 tx_aborted_errors;
+ __u32 tx_carrier_errors;
+ __u32 tx_fifo_errors;
+ __u32 tx_heartbeat_errors;
+ __u32 tx_window_errors;
+ __u32 rx_compressed;
+ __u32 tx_compressed;
+ __u32 rx_nohandler;
+};
+
+struct rtnl_link_ifmap {
+ __u64 mem_start;
+ __u64 mem_end;
+ __u64 base_addr;
+ __u16 irq;
+ __u8 dma;
+ __u8 port;
+};
+
+enum {
+ IFLA_BRPORT_UNSPEC = 0,
+ IFLA_BRPORT_STATE = 1,
+ IFLA_BRPORT_PRIORITY = 2,
+ IFLA_BRPORT_COST = 3,
+ IFLA_BRPORT_MODE = 4,
+ IFLA_BRPORT_GUARD = 5,
+ IFLA_BRPORT_PROTECT = 6,
+ IFLA_BRPORT_FAST_LEAVE = 7,
+ IFLA_BRPORT_LEARNING = 8,
+ IFLA_BRPORT_UNICAST_FLOOD = 9,
+ IFLA_BRPORT_PROXYARP = 10,
+ IFLA_BRPORT_LEARNING_SYNC = 11,
+ IFLA_BRPORT_PROXYARP_WIFI = 12,
+ IFLA_BRPORT_ROOT_ID = 13,
+ IFLA_BRPORT_BRIDGE_ID = 14,
+ IFLA_BRPORT_DESIGNATED_PORT = 15,
+ IFLA_BRPORT_DESIGNATED_COST = 16,
+ IFLA_BRPORT_ID = 17,
+ IFLA_BRPORT_NO = 18,
+ IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19,
+ IFLA_BRPORT_CONFIG_PENDING = 20,
+ IFLA_BRPORT_MESSAGE_AGE_TIMER = 21,
+ IFLA_BRPORT_FORWARD_DELAY_TIMER = 22,
+ IFLA_BRPORT_HOLD_TIMER = 23,
+ IFLA_BRPORT_FLUSH = 24,
+ IFLA_BRPORT_MULTICAST_ROUTER = 25,
+ IFLA_BRPORT_PAD = 26,
+ IFLA_BRPORT_MCAST_FLOOD = 27,
+ IFLA_BRPORT_MCAST_TO_UCAST = 28,
+ IFLA_BRPORT_VLAN_TUNNEL = 29,
+ IFLA_BRPORT_BCAST_FLOOD = 30,
+ IFLA_BRPORT_GROUP_FWD_MASK = 31,
+ IFLA_BRPORT_NEIGH_SUPPRESS = 32,
+ IFLA_BRPORT_ISOLATED = 33,
+ IFLA_BRPORT_BACKUP_PORT = 34,
+ IFLA_BRPORT_MRP_RING_OPEN = 35,
+ __IFLA_BRPORT_MAX = 36,
+};
+
+enum {
+ IFLA_INFO_UNSPEC = 0,
+ IFLA_INFO_KIND = 1,
+ IFLA_INFO_DATA = 2,
+ IFLA_INFO_XSTATS = 3,
+ IFLA_INFO_SLAVE_KIND = 4,
+ IFLA_INFO_SLAVE_DATA = 5,
+ __IFLA_INFO_MAX = 6,
+};
+
+enum {
+ IFLA_VF_INFO_UNSPEC = 0,
+ IFLA_VF_INFO = 1,
+ __IFLA_VF_INFO_MAX = 2,
+};
+
+enum {
+ IFLA_VF_UNSPEC = 0,
+ IFLA_VF_MAC = 1,
+ IFLA_VF_VLAN = 2,
+ IFLA_VF_TX_RATE = 3,
+ IFLA_VF_SPOOFCHK = 4,
+ IFLA_VF_LINK_STATE = 5,
+ IFLA_VF_RATE = 6,
+ IFLA_VF_RSS_QUERY_EN = 7,
+ IFLA_VF_STATS = 8,
+ IFLA_VF_TRUST = 9,
+ IFLA_VF_IB_NODE_GUID = 10,
+ IFLA_VF_IB_PORT_GUID = 11,
+ IFLA_VF_VLAN_LIST = 12,
+ IFLA_VF_BROADCAST = 13,
+ __IFLA_VF_MAX = 14,
+};
+
+struct ifla_vf_mac {
+ __u32 vf;
+ __u8 mac[32];
+};
+
+struct ifla_vf_broadcast {
+ __u8 broadcast[32];
+};
+
+struct ifla_vf_vlan {
+ __u32 vf;
+ __u32 vlan;
+ __u32 qos;
+};
+
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC = 0,
+ IFLA_VF_VLAN_INFO = 1,
+ __IFLA_VF_VLAN_INFO_MAX = 2,
+};
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan;
+ __u32 qos;
+ __be16 vlan_proto;
+};
+
+struct ifla_vf_tx_rate {
+ __u32 vf;
+ __u32 rate;
+};
+
+struct ifla_vf_rate {
+ __u32 vf;
+ __u32 min_tx_rate;
+ __u32 max_tx_rate;
+};
+
+struct ifla_vf_spoofchk {
+ __u32 vf;
+ __u32 setting;
+};
+
+struct ifla_vf_link_state {
+ __u32 vf;
+ __u32 link_state;
+};
+
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
+enum {
+ IFLA_VF_STATS_RX_PACKETS = 0,
+ IFLA_VF_STATS_TX_PACKETS = 1,
+ IFLA_VF_STATS_RX_BYTES = 2,
+ IFLA_VF_STATS_TX_BYTES = 3,
+ IFLA_VF_STATS_BROADCAST = 4,
+ IFLA_VF_STATS_MULTICAST = 5,
+ IFLA_VF_STATS_PAD = 6,
+ IFLA_VF_STATS_RX_DROPPED = 7,
+ IFLA_VF_STATS_TX_DROPPED = 8,
+ __IFLA_VF_STATS_MAX = 9,
+};
+
+struct ifla_vf_trust {
+ __u32 vf;
+ __u32 setting;
+};
+
+enum {
+ IFLA_VF_PORT_UNSPEC = 0,
+ IFLA_VF_PORT = 1,
+ __IFLA_VF_PORT_MAX = 2,
+};
+
+enum {
+ IFLA_PORT_UNSPEC = 0,
+ IFLA_PORT_VF = 1,
+ IFLA_PORT_PROFILE = 2,
+ IFLA_PORT_VSI_TYPE = 3,
+ IFLA_PORT_INSTANCE_UUID = 4,
+ IFLA_PORT_HOST_UUID = 5,
+ IFLA_PORT_REQUEST = 6,
+ IFLA_PORT_RESPONSE = 7,
+ __IFLA_PORT_MAX = 8,
+};
+
+struct if_stats_msg {
+ __u8 family;
+ __u8 pad1;
+ __u16 pad2;
+ __u32 ifindex;
+ __u32 filter_mask;
+};
+
+enum {
+ IFLA_STATS_UNSPEC = 0,
+ IFLA_STATS_LINK_64 = 1,
+ IFLA_STATS_LINK_XSTATS = 2,
+ IFLA_STATS_LINK_XSTATS_SLAVE = 3,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS = 4,
+ IFLA_STATS_AF_SPEC = 5,
+ __IFLA_STATS_MAX = 6,
+};
+
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC = 0,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT = 1,
+ __IFLA_OFFLOAD_XSTATS_MAX = 2,
+};
+
+enum {
+ XDP_ATTACHED_NONE = 0,
+ XDP_ATTACHED_DRV = 1,
+ XDP_ATTACHED_SKB = 2,
+ XDP_ATTACHED_HW = 3,
+ XDP_ATTACHED_MULTI = 4,
+};
+
+enum {
+ IFLA_XDP_UNSPEC = 0,
+ IFLA_XDP_FD = 1,
+ IFLA_XDP_ATTACHED = 2,
+ IFLA_XDP_FLAGS = 3,
+ IFLA_XDP_PROG_ID = 4,
+ IFLA_XDP_DRV_PROG_ID = 5,
+ IFLA_XDP_SKB_PROG_ID = 6,
+ IFLA_XDP_HW_PROG_ID = 7,
+ IFLA_XDP_EXPECTED_FD = 8,
+ __IFLA_XDP_MAX = 9,
+};
+
+enum {
+ IFLA_EVENT_NONE = 0,
+ IFLA_EVENT_REBOOT = 1,
+ IFLA_EVENT_FEATURES = 2,
+ IFLA_EVENT_BONDING_FAILOVER = 3,
+ IFLA_EVENT_NOTIFY_PEERS = 4,
+ IFLA_EVENT_IGMP_RESEND = 5,
+ IFLA_EVENT_BONDING_OPTIONS = 6,
+};
+
+enum {
+ IFLA_BRIDGE_FLAGS = 0,
+ IFLA_BRIDGE_MODE = 1,
+ IFLA_BRIDGE_VLAN_INFO = 2,
+ IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3,
+ IFLA_BRIDGE_MRP = 4,
+ __IFLA_BRIDGE_MAX = 5,
+};
+
+enum {
+ BR_MCAST_DIR_RX = 0,
+ BR_MCAST_DIR_TX = 1,
+ BR_MCAST_DIR_SIZE = 2,
+};
+
+enum rtattr_type_t {
+ RTA_UNSPEC = 0,
+ RTA_DST = 1,
+ RTA_SRC = 2,
+ RTA_IIF = 3,
+ RTA_OIF = 4,
+ RTA_GATEWAY = 5,
+ RTA_PRIORITY = 6,
+ RTA_PREFSRC = 7,
+ RTA_METRICS = 8,
+ RTA_MULTIPATH = 9,
+ RTA_PROTOINFO = 10,
+ RTA_FLOW = 11,
+ RTA_CACHEINFO = 12,
+ RTA_SESSION = 13,
+ RTA_MP_ALGO = 14,
+ RTA_TABLE = 15,
+ RTA_MARK = 16,
+ RTA_MFC_STATS = 17,
+ RTA_VIA = 18,
+ RTA_NEWDST = 19,
+ RTA_PREF = 20,
+ RTA_ENCAP_TYPE = 21,
+ RTA_ENCAP = 22,
+ RTA_EXPIRES = 23,
+ RTA_PAD = 24,
+ RTA_UID = 25,
+ RTA_TTL_PROPAGATE = 26,
+ RTA_IP_PROTO = 27,
+ RTA_SPORT = 28,
+ RTA_DPORT = 29,
+ RTA_NH_ID = 30,
+ __RTA_MAX = 31,
+};
+
+struct rta_cacheinfo {
+ __u32 rta_clntref;
+ __u32 rta_lastuse;
+ __s32 rta_expires;
+ __u32 rta_error;
+ __u32 rta_used;
+ __u32 rta_id;
+ __u32 rta_ts;
+ __u32 rta_tsage;
+};
+
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *);
+
+typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+
+struct rtnl_af_ops {
+ struct list_head list;
+ int family;
+ int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32);
+ size_t (*get_link_af_size)(const struct net_device *, u32);
+ int (*validate_link_af)(const struct net_device *, const struct nlattr *);
+ int (*set_link_af)(struct net_device *, const struct nlattr *);
+ int (*fill_stats_af)(struct sk_buff *, const struct net_device *);
+ size_t (*get_stats_af_size)(const struct net_device *);
+};
+
+struct rtnl_link {
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ struct module *owner;
+ unsigned int flags;
+ struct callback_head rcu;
+};
+
+enum {
+ IF_LINK_MODE_DEFAULT = 0,
+ IF_LINK_MODE_DORMANT = 1,
+ IF_LINK_MODE_TESTING = 2,
+};
+
+enum lw_bits {
+ LW_URGENT = 0,
+};
+
+struct seg6_pernet_data {
+ struct mutex lock;
+ struct in6_addr *tun_src;
+};
+
+enum {
+ BPF_F_RECOMPUTE_CSUM = 1,
+ BPF_F_INVALIDATE_HASH = 2,
+};
+
+enum {
+ BPF_F_HDR_FIELD_MASK = 15,
+};
+
+enum {
+ BPF_F_PSEUDO_HDR = 16,
+ BPF_F_MARK_MANGLED_0 = 32,
+ BPF_F_MARK_ENFORCE = 64,
+};
+
+enum {
+ BPF_F_INGRESS = 1,
+};
+
+enum {
+ BPF_F_TUNINFO_IPV6 = 1,
+};
+
+enum {
+ BPF_F_ZERO_CSUM_TX = 2,
+ BPF_F_DONT_FRAGMENT = 4,
+ BPF_F_SEQ_NUMBER = 8,
+};
+
+enum {
+ BPF_CSUM_LEVEL_QUERY = 0,
+ BPF_CSUM_LEVEL_INC = 1,
+ BPF_CSUM_LEVEL_DEC = 2,
+ BPF_CSUM_LEVEL_RESET = 3,
+};
+
+enum {
+ BPF_F_ADJ_ROOM_FIXED_GSO = 1,
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2,
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4,
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8,
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16,
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32,
+};
+
+enum {
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 255,
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
+};
+
+enum bpf_adj_room_mode {
+ BPF_ADJ_ROOM_NET = 0,
+ BPF_ADJ_ROOM_MAC = 1,
+};
+
+enum bpf_hdr_start_off {
+ BPF_HDR_START_MAC = 0,
+ BPF_HDR_START_NET = 1,
+};
+
+struct bpf_tunnel_key {
+ __u32 tunnel_id;
+ union {
+ __u32 remote_ipv4;
+ __u32 remote_ipv6[4];
+ };
+ __u8 tunnel_tos;
+ __u8 tunnel_ttl;
+ __u16 tunnel_ext;
+ __u32 tunnel_label;
+};
+
+struct bpf_xfrm_state {
+ __u32 reqid;
+ __u32 spi;
+ __u16 family;
+ __u16 ext;
+ union {
+ __u32 remote_ipv4;
+ __u32 remote_ipv6[4];
+ };
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd;
+ __u32 srtt_us;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u64 bytes_received;
+ __u64 bytes_acked;
+ __u32 dsack_dups;
+ __u32 delivered;
+ __u32 delivered_ce;
+ __u32 icsk_retransmits;
+};
+
+struct bpf_sock_tuple {
+ union {
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ } ipv4;
+ struct {
+ __be32 saddr[4];
+ __be32 daddr[4];
+ __be16 sport;
+ __be16 dport;
+ } ipv6;
+ };
+};
+
+struct bpf_xdp_sock {
+ __u32 queue_id;
+};
+
+enum sk_action {
+ SK_DROP = 0,
+ SK_PASS = 1,
+};
+
+enum {
+ BPF_SOCK_OPS_RTO_CB_FLAG = 1,
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 2,
+ BPF_SOCK_OPS_STATE_CB_FLAG = 4,
+ BPF_SOCK_OPS_RTT_CB_FLAG = 8,
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 15,
+};
+
+enum {
+ BPF_SOCK_OPS_VOID = 0,
+ BPF_SOCK_OPS_TIMEOUT_INIT = 1,
+ BPF_SOCK_OPS_RWND_INIT = 2,
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 3,
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4,
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5,
+ BPF_SOCK_OPS_NEEDS_ECN = 6,
+ BPF_SOCK_OPS_BASE_RTT = 7,
+ BPF_SOCK_OPS_RTO_CB = 8,
+ BPF_SOCK_OPS_RETRANS_CB = 9,
+ BPF_SOCK_OPS_STATE_CB = 10,
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 11,
+ BPF_SOCK_OPS_RTT_CB = 12,
+};
+
+enum {
+ TCP_BPF_IW = 1001,
+ TCP_BPF_SNDCWND_CLAMP = 1002,
+};
+
+enum {
+ BPF_FIB_LOOKUP_DIRECT = 1,
+ BPF_FIB_LOOKUP_OUTPUT = 2,
+};
+
+enum {
+ BPF_FIB_LKUP_RET_SUCCESS = 0,
+ BPF_FIB_LKUP_RET_BLACKHOLE = 1,
+ BPF_FIB_LKUP_RET_UNREACHABLE = 2,
+ BPF_FIB_LKUP_RET_PROHIBIT = 3,
+ BPF_FIB_LKUP_RET_NOT_FWDED = 4,
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 5,
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 6,
+ BPF_FIB_LKUP_RET_NO_NEIGH = 7,
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 8,
+};
+
+struct bpf_fib_lookup {
+ __u8 family;
+ __u8 l4_protocol;
+ __be16 sport;
+ __be16 dport;
+ __u16 tot_len;
+ __u32 ifindex;
+ union {
+ __u8 tos;
+ __be32 flowinfo;
+ __u32 rt_metric;
+ };
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4];
+ };
+ union {
+ __be32 ipv4_dst;
+ __u32 ipv6_dst[4];
+ };
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __u8 smac[6];
+ __u8 dmac[6];
+};
+
+struct xsk_queue;
+
+struct xsk_buff_pool;
+
+struct xdp_umem {
+ struct xsk_queue *fq;
+ struct xsk_queue *cq;
+ struct xsk_buff_pool *pool;
+ u64 size;
+ u32 headroom;
+ u32 chunk_size;
+ struct user_struct *user;
+ refcount_t users;
+ struct work_struct work;
+ struct page **pgs;
+ u32 npgs;
+ u16 queue_id;
+ u8 need_wakeup;
+ u8 flags;
+ int id;
+ struct net_device *dev;
+ bool zc;
+ spinlock_t xsk_tx_list_lock;
+ struct list_head xsk_tx_list;
+};
+
+enum rt_scope_t {
+ RT_SCOPE_UNIVERSE = 0,
+ RT_SCOPE_SITE = 200,
+ RT_SCOPE_LINK = 253,
+ RT_SCOPE_HOST = 254,
+ RT_SCOPE_NOWHERE = 255,
+};
+
+enum rt_class_t {
+ RT_TABLE_UNSPEC = 0,
+ RT_TABLE_COMPAT = 252,
+ RT_TABLE_DEFAULT = 253,
+ RT_TABLE_MAIN = 254,
+ RT_TABLE_LOCAL = 255,
+ RT_TABLE_MAX = -1,
+};
+
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int);
+
+struct fib_table;
+
+struct fib_result {
+ __be32 prefix;
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+ u32 tclassid;
+ struct fib_nh_common *nhc;
+ struct fib_info *fi;
+ struct fib_table *table;
+ struct hlist_head *fa_head;
+};
+
+struct fib_table {
+ struct hlist_node tb_hlist;
+ u32 tb_id;
+ int tb_num_default;
+ struct callback_head rcu;
+ long unsigned int *tb_data;
+ long unsigned int __data[0];
+};
+
+enum {
+ INET_ECN_NOT_ECT = 0,
+ INET_ECN_ECT_1 = 1,
+ INET_ECN_ECT_0 = 2,
+ INET_ECN_CE = 3,
+ INET_ECN_MASK = 3,
+};
+
+struct tcp_skb_cb {
+ __u32 seq;
+ __u32 end_seq;
+ union {
+ __u32 tcp_tw_isn;
+ struct {
+ u16 tcp_gso_segs;
+ u16 tcp_gso_size;
+ };
+ };
+ __u8 tcp_flags;
+ __u8 sacked;
+ __u8 ip_dsfield;
+ __u8 txstamp_ack: 1;
+ __u8 eor: 1;
+ __u8 has_rxtstamp: 1;
+ __u8 unused: 5;
+ __u32 ack_seq;
+ union {
+ struct {
+ __u32 in_flight: 30;
+ __u32 is_app_limited: 1;
+ __u32 unused: 1;
+ __u32 delivered;
+ u64 first_tx_mstamp;
+ u64 delivered_mstamp;
+ } tx;
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+ struct {
+ __u32 flags;
+ struct sock *sk_redir;
+ void *data_end;
+ } bpf;
+ };
+};
+
+struct xdp_sock;
+
+struct xsk_map {
+ struct bpf_map map;
+ spinlock_t lock;
+ struct xdp_sock *xsk_map[0];
+};
+
+struct xdp_sock {
+ struct sock sk;
+ struct xsk_queue *rx;
+ struct net_device *dev;
+ struct xdp_umem *umem;
+ struct list_head flush_node;
+ u16 queue_id;
+ bool zc;
+ enum {
+ XSK_READY = 0,
+ XSK_BOUND = 1,
+ XSK_UNBOUND = 2,
+ } state;
+ struct mutex mutex;
+ long: 64;
+ long: 64;
+ struct xsk_queue *tx;
+ struct list_head list;
+ spinlock_t tx_completion_lock;
+ spinlock_t rx_lock;
+ u64 rx_dropped;
+ struct list_head map_list;
+ spinlock_t map_list_lock;
+ long: 64;
+ long: 64;
+};
+
+struct ipv6_sr_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+ __u8 first_segment;
+ __u8 flags;
+ __u16 tag;
+ struct in6_addr segments[0];
+};
+
+struct seg6_bpf_srh_state {
+ struct ipv6_sr_hdr *srh;
+ u16 hdrlen;
+ bool valid;
+};
+
+typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32);
+
+typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32);
+
+typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int);
+
+typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int);
+
+typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int);
+
+typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int);
+
+typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int);
+
+typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int);
+
+struct bpf_scratchpad {
+ union {
+ __be32 diff[128];
+ u8 buff[512];
+ };
+};
+
+typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64);
+
+typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32);
+
+typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32);
+
+typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32);
+
+typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32);
+
+typedef u64 (*btf_bpf_sk_fullsock)(struct sock *);
+
+typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32);
+
+typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64);
+
+typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64);
+
+typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum);
+
+typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum);
+
+typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64);
+
+typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64);
+
+typedef u64 (*btf_bpf_redirect)(u32, u64);
+
+typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32);
+
+typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32);
+
+typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_get_cgroup_classid_curr)();
+
+typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *);
+
+typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *);
+
+typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32);
+
+typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16);
+
+typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64);
+
+typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32);
+
+typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64);
+
+typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64);
+
+typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64);
+
+typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64);
+
+typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64);
+
+typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int);
+
+typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int);
+
+typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int);
+
+typedef u64 (*btf_bpf_xdp_redirect)(u32, u64);
+
+typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u32, u64);
+
+typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64);
+
+typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64);
+
+typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32);
+
+typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64);
+
+typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32);
+
+typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32);
+
+typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *);
+
+typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int);
+
+typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *);
+
+typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int);
+
+typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64);
+
+typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *);
+
+typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *);
+
+typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *);
+
+typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *);
+
+typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *);
+
+typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int);
+
+typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int);
+
+typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int);
+
+typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int);
+
+typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int);
+
+typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int);
+
+typedef u64 (*btf_bpf_skb_get_xfrm_state)(struct sk_buff *, u32, struct bpf_xfrm_state *, u32, u64);
+
+typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32);
+
+typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32);
+
+typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32);
+
+typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32);
+
+typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_sk_release)(struct sock *);
+
+typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64);
+
+typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64);
+
+typedef u64 (*btf_bpf_tcp_sock)(struct sock *);
+
+typedef u64 (*btf_bpf_get_listener_sock)(struct sock *);
+
+typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *);
+
+typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32);
+
+typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32);
+
+typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64);
+
+typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32);
+
+typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32);
+
+typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32);
+
+struct bpf_dtab_netdev___2;
+
+enum {
+ INET_DIAG_REQ_NONE = 0,
+ INET_DIAG_REQ_BYTECODE = 1,
+ INET_DIAG_REQ_SK_BPF_STORAGES = 2,
+ __INET_DIAG_REQ_MAX = 3,
+};
+
+struct sock_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+};
+
+struct sock_diag_handler {
+ __u8 family;
+ int (*dump)(struct sk_buff *, struct nlmsghdr *);
+ int (*get_info)(struct sk_buff *, struct sock *);
+ int (*destroy)(struct sk_buff *, struct nlmsghdr *);
+};
+
+struct broadcast_sk {
+ struct sock *sk;
+ struct work_struct work;
+};
+
+typedef int gifconf_func_t(struct net_device *, char *, int, int);
+
+struct hwtstamp_config {
+ int flags;
+ int tx_type;
+ int rx_filter;
+};
+
+enum hwtstamp_tx_types {
+ HWTSTAMP_TX_OFF = 0,
+ HWTSTAMP_TX_ON = 1,
+ HWTSTAMP_TX_ONESTEP_SYNC = 2,
+ HWTSTAMP_TX_ONESTEP_P2P = 3,
+ __HWTSTAMP_TX_CNT = 4,
+};
+
+enum hwtstamp_rx_filters {
+ HWTSTAMP_FILTER_NONE = 0,
+ HWTSTAMP_FILTER_ALL = 1,
+ HWTSTAMP_FILTER_SOME = 2,
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3,
+ HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4,
+ HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5,
+ HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6,
+ HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7,
+ HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8,
+ HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9,
+ HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10,
+ HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11,
+ HWTSTAMP_FILTER_PTP_V2_EVENT = 12,
+ HWTSTAMP_FILTER_PTP_V2_SYNC = 13,
+ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14,
+ HWTSTAMP_FILTER_NTP_ALL = 15,
+ __HWTSTAMP_FILTER_CNT = 16,
+};
+
+struct tso_t {
+ int next_frag_idx;
+ void *data;
+ size_t size;
+ u16 ip_id;
+ bool ipv6;
+ u32 tcp_seq;
+};
+
+struct fib_notifier_info {
+ int family;
+ struct netlink_ext_ack *extack;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_REPLACE = 0,
+ FIB_EVENT_ENTRY_APPEND = 1,
+ FIB_EVENT_ENTRY_ADD = 2,
+ FIB_EVENT_ENTRY_DEL = 3,
+ FIB_EVENT_RULE_ADD = 4,
+ FIB_EVENT_RULE_DEL = 5,
+ FIB_EVENT_NH_ADD = 6,
+ FIB_EVENT_NH_DEL = 7,
+ FIB_EVENT_VIF_ADD = 8,
+ FIB_EVENT_VIF_DEL = 9,
+};
+
+struct fib_notifier_net {
+ struct list_head fib_notifier_ops;
+ struct atomic_notifier_head fib_chain;
+};
+
+struct xdp_attachment_info {
+ struct bpf_prog *prog;
+ u32 flags;
+};
+
+struct pp_alloc_cache {
+ u32 count;
+ void *cache[128];
+};
+
+struct page_pool_params {
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid;
+ struct device *dev;
+ enum dma_data_direction dma_dir;
+ unsigned int max_len;
+ unsigned int offset;
+};
+
+struct page_pool {
+ struct page_pool_params p;
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ long unsigned int defer_start;
+ long unsigned int defer_warn;
+ u32 pages_state_hold_cnt;
+ long: 32;
+ long: 64;
+ long: 64;
+ struct pp_alloc_cache alloc;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct ptr_ring ring;
+ atomic_t pages_state_release_cnt;
+ refcount_t user_cnt;
+ u64 destroy_cnt;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xdp_buff_xsk;
+
+struct xsk_buff_pool {
+ struct xsk_queue *fq;
+ struct list_head free_list;
+ dma_addr_t *dma_pages;
+ struct xdp_buff_xsk *heads;
+ u64 chunk_mask;
+ u64 addrs_cnt;
+ u32 free_list_cnt;
+ u32 dma_pages_cnt;
+ u32 heads_cnt;
+ u32 free_heads_cnt;
+ u32 headroom;
+ u32 chunk_size;
+ u32 frame_len;
+ bool cheap_dma;
+ bool unaligned;
+ void *addrs;
+ struct device *dev;
+ struct xdp_buff_xsk *free_heads[0];
+};
+
+struct xdp_buff_xsk {
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+ dma_addr_t frame_dma;
+ struct xsk_buff_pool *pool;
+ bool unaligned;
+ u64 orig_addr;
+ struct list_head free_list_node;
+};
+
+struct flow_match {
+ struct flow_dissector *dissector;
+ void *mask;
+ void *key;
+};
+
+struct flow_match_meta {
+ struct flow_dissector_key_meta *key;
+ struct flow_dissector_key_meta *mask;
+};
+
+struct flow_match_basic {
+ struct flow_dissector_key_basic *key;
+ struct flow_dissector_key_basic *mask;
+};
+
+struct flow_match_control {
+ struct flow_dissector_key_control *key;
+ struct flow_dissector_key_control *mask;
+};
+
+struct flow_match_eth_addrs {
+ struct flow_dissector_key_eth_addrs *key;
+ struct flow_dissector_key_eth_addrs *mask;
+};
+
+struct flow_match_vlan {
+ struct flow_dissector_key_vlan *key;
+ struct flow_dissector_key_vlan *mask;
+};
+
+struct flow_match_ipv4_addrs {
+ struct flow_dissector_key_ipv4_addrs *key;
+ struct flow_dissector_key_ipv4_addrs *mask;
+};
+
+struct flow_match_ipv6_addrs {
+ struct flow_dissector_key_ipv6_addrs *key;
+ struct flow_dissector_key_ipv6_addrs *mask;
+};
+
+struct flow_match_ip {
+ struct flow_dissector_key_ip *key;
+ struct flow_dissector_key_ip *mask;
+};
+
+struct flow_match_ports {
+ struct flow_dissector_key_ports *key;
+ struct flow_dissector_key_ports *mask;
+};
+
+struct flow_match_icmp {
+ struct flow_dissector_key_icmp *key;
+ struct flow_dissector_key_icmp *mask;
+};
+
+struct flow_match_tcp {
+ struct flow_dissector_key_tcp *key;
+ struct flow_dissector_key_tcp *mask;
+};
+
+struct flow_match_mpls {
+ struct flow_dissector_key_mpls *key;
+ struct flow_dissector_key_mpls *mask;
+};
+
+struct flow_match_enc_keyid {
+ struct flow_dissector_key_keyid *key;
+ struct flow_dissector_key_keyid *mask;
+};
+
+struct flow_match_enc_opts {
+ struct flow_dissector_key_enc_opts *key;
+ struct flow_dissector_key_enc_opts *mask;
+};
+
+struct flow_match_ct {
+ struct flow_dissector_key_ct *key;
+ struct flow_dissector_key_ct *mask;
+};
+
+enum flow_action_id {
+ FLOW_ACTION_ACCEPT = 0,
+ FLOW_ACTION_DROP = 1,
+ FLOW_ACTION_TRAP = 2,
+ FLOW_ACTION_GOTO = 3,
+ FLOW_ACTION_REDIRECT = 4,
+ FLOW_ACTION_MIRRED = 5,
+ FLOW_ACTION_REDIRECT_INGRESS = 6,
+ FLOW_ACTION_MIRRED_INGRESS = 7,
+ FLOW_ACTION_VLAN_PUSH = 8,
+ FLOW_ACTION_VLAN_POP = 9,
+ FLOW_ACTION_VLAN_MANGLE = 10,
+ FLOW_ACTION_TUNNEL_ENCAP = 11,
+ FLOW_ACTION_TUNNEL_DECAP = 12,
+ FLOW_ACTION_MANGLE = 13,
+ FLOW_ACTION_ADD = 14,
+ FLOW_ACTION_CSUM = 15,
+ FLOW_ACTION_MARK = 16,
+ FLOW_ACTION_PTYPE = 17,
+ FLOW_ACTION_PRIORITY = 18,
+ FLOW_ACTION_WAKE = 19,
+ FLOW_ACTION_QUEUE = 20,
+ FLOW_ACTION_SAMPLE = 21,
+ FLOW_ACTION_POLICE = 22,
+ FLOW_ACTION_CT = 23,
+ FLOW_ACTION_CT_METADATA = 24,
+ FLOW_ACTION_MPLS_PUSH = 25,
+ FLOW_ACTION_MPLS_POP = 26,
+ FLOW_ACTION_MPLS_MANGLE = 27,
+ FLOW_ACTION_GATE = 28,
+ NUM_FLOW_ACTIONS = 29,
+};
+
+enum flow_action_mangle_base {
+ FLOW_ACT_MANGLE_UNSPEC = 0,
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3,
+ FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4,
+ FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5,
+};
+
+enum flow_action_hw_stats {
+ FLOW_ACTION_HW_STATS_IMMEDIATE = 1,
+ FLOW_ACTION_HW_STATS_DELAYED = 2,
+ FLOW_ACTION_HW_STATS_ANY = 3,
+ FLOW_ACTION_HW_STATS_DISABLED = 4,
+ FLOW_ACTION_HW_STATS_DONT_CARE = 7,
+};
+
+typedef void (*action_destr)(void *);
+
+struct flow_action_cookie {
+ u32 cookie_len;
+ u8 cookie[0];
+};
+
+struct nf_flowtable;
+
+struct psample_group;
+
+struct action_gate_entry;
+
+struct flow_action_entry {
+ enum flow_action_id id;
+ enum flow_action_hw_stats hw_stats;
+ action_destr destructor;
+ void *destructor_priv;
+ union {
+ u32 chain_index;
+ struct net_device *dev;
+ struct {
+ u16 vid;
+ __be16 proto;
+ u8 prio;
+ } vlan;
+ struct {
+ enum flow_action_mangle_base htype;
+ u32 offset;
+ u32 mask;
+ u32 val;
+ } mangle;
+ struct ip_tunnel_info *tunnel;
+ u32 csum_flags;
+ u32 mark;
+ u16 ptype;
+ u32 priority;
+ struct {
+ u32 ctx;
+ u32 index;
+ u8 vf;
+ } queue;
+ struct {
+ struct psample_group *psample_group;
+ u32 rate;
+ u32 trunc_size;
+ bool truncate;
+ } sample;
+ struct {
+ s64 burst;
+ u64 rate_bytes_ps;
+ } police;
+ struct {
+ int action;
+ u16 zone;
+ struct nf_flowtable *flow_table;
+ } ct;
+ struct {
+ long unsigned int cookie;
+ u32 mark;
+ u32 labels[4];
+ } ct_metadata;
+ struct {
+ u32 label;
+ __be16 proto;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+ } mpls_push;
+ struct {
+ __be16 proto;
+ } mpls_pop;
+ struct {
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+ } mpls_mangle;
+ struct {
+ u32 index;
+ s32 prio;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimeext;
+ u32 num_entries;
+ struct action_gate_entry *entries;
+ } gate;
+ };
+ struct flow_action_cookie *cookie;
+};
+
+struct flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry entries[0];
+};
+
+struct flow_rule {
+ struct flow_match match;
+ struct flow_action action;
+};
+
+enum flow_block_command {
+ FLOW_BLOCK_BIND = 0,
+ FLOW_BLOCK_UNBIND = 1,
+};
+
+enum flow_block_binder_type {
+ FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0,
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1,
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2,
+};
+
+struct flow_block_offload {
+ enum flow_block_command command;
+ enum flow_block_binder_type binder_type;
+ bool block_shared;
+ bool unlocked_driver_cb;
+ struct net *net;
+ struct flow_block *block;
+ struct list_head cb_list;
+ struct list_head *driver_block_list;
+ struct netlink_ext_ack *extack;
+};
+
+struct flow_block_cb;
+
+struct flow_block_indr {
+ struct list_head list;
+ struct net_device *dev;
+ enum flow_block_binder_type binder_type;
+ void *data;
+ void (*cleanup)(struct flow_block_cb *);
+};
+
+struct flow_block_cb {
+ struct list_head driver_list;
+ struct list_head list;
+ flow_setup_cb_t *cb;
+ void *cb_ident;
+ void *cb_priv;
+ void (*release)(void *);
+ struct flow_block_indr indr;
+ unsigned int refcnt;
+};
+
+typedef int flow_indr_block_bind_cb_t(struct net_device *, void *, enum tc_setup_type, void *);
+
+struct flow_indr_dev {
+ struct list_head list;
+ flow_indr_block_bind_cb_t *cb;
+ void *cb_priv;
+ refcount_t refcnt;
+ struct callback_head rcu;
+};
+
+struct netdev_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_queue *, char *);
+ ssize_t (*store)(struct netdev_queue *, const char *, size_t);
+};
+
+struct strp_stats {
+ long long unsigned int msgs;
+ long long unsigned int bytes;
+ unsigned int mem_fail;
+ unsigned int need_more_hdr;
+ unsigned int msg_too_big;
+ unsigned int msg_timeouts;
+ unsigned int bad_hdr_len;
+};
+
+struct strparser;
+
+struct strp_callbacks {
+ int (*parse_msg)(struct strparser *, struct sk_buff *);
+ void (*rcv_msg)(struct strparser *, struct sk_buff *);
+ int (*read_sock_done)(struct strparser *, int);
+ void (*abort_parser)(struct strparser *, int);
+ void (*lock)(struct strparser *);
+ void (*unlock)(struct strparser *);
+};
+
+struct strparser {
+ struct sock *sk;
+ u32 stopped: 1;
+ u32 paused: 1;
+ u32 aborted: 1;
+ u32 interrupted: 1;
+ u32 unrecov_intr: 1;
+ struct sk_buff **skb_nextp;
+ struct sk_buff *skb_head;
+ unsigned int need_bytes;
+ struct delayed_work msg_timer_work;
+ struct work_struct work;
+ struct strp_stats stats;
+ struct strp_callbacks cb;
+};
+
+enum __sk_action {
+ __SK_DROP = 0,
+ __SK_PASS = 1,
+ __SK_REDIRECT = 2,
+ __SK_NONE = 3,
+};
+
+struct sk_psock_progs {
+ struct bpf_prog *msg_parser;
+ struct bpf_prog *skb_parser;
+ struct bpf_prog *skb_verdict;
+};
+
+enum sk_psock_state_bits {
+ SK_PSOCK_TX_ENABLED = 0,
+};
+
+struct sk_psock_link {
+ struct list_head list;
+ struct bpf_map *map;
+ void *link_raw;
+};
+
+struct sk_psock_parser {
+ struct strparser strp;
+ bool enabled;
+ void (*saved_data_ready)(struct sock *);
+};
+
+struct sk_psock_work_state {
+ struct sk_buff *skb;
+ u32 len;
+ u32 off;
+};
+
+struct sk_psock {
+ struct sock *sk;
+ struct sock *sk_redir;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 eval;
+ struct sk_msg *cork;
+ struct sk_psock_progs progs;
+ struct sk_psock_parser parser;
+ struct sk_buff_head ingress_skb;
+ struct list_head ingress_msg;
+ long unsigned int state;
+ struct list_head link;
+ spinlock_t link_lock;
+ refcount_t refcnt;
+ void (*saved_unhash)(struct sock *);
+ void (*saved_close)(struct sock *, long int);
+ void (*saved_write_space)(struct sock *);
+ struct proto *sk_proto;
+ struct sk_psock_work_state work_state;
+ struct work_struct work;
+ union {
+ struct callback_head rcu;
+ struct work_struct gc;
+ };
+};
+
+struct tls_crypto_info {
+ __u16 version;
+ __u16 cipher_type;
+};
+
+struct tls12_crypto_info_aes_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[8];
+ unsigned char key[16];
+ unsigned char salt[4];
+ unsigned char rec_seq[8];
+};
+
+struct tls12_crypto_info_aes_gcm_256 {
+ struct tls_crypto_info info;
+ unsigned char iv[8];
+ unsigned char key[32];
+ unsigned char salt[4];
+ unsigned char rec_seq[8];
+};
+
+struct tls_sw_context_rx {
+ struct crypto_aead *aead_recv;
+ struct crypto_wait async_wait;
+ struct strparser strp;
+ struct sk_buff_head rx_list;
+ void (*saved_data_ready)(struct sock *);
+ struct sk_buff *recv_pkt;
+ u8 control;
+ u8 async_capable: 1;
+ u8 decrypted: 1;
+ atomic_t decrypt_pending;
+ spinlock_t decrypt_compl_lock;
+ bool async_notify;
+};
+
+struct cipher_context {
+ char *iv;
+ char *rec_seq;
+};
+
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ };
+};
+
+struct tls_prot_info {
+ u16 version;
+ u16 cipher_type;
+ u16 prepend_size;
+ u16 tag_size;
+ u16 overhead_size;
+ u16 iv_size;
+ u16 salt_size;
+ u16 rec_seq_size;
+ u16 aad_size;
+ u16 tail_size;
+};
+
+struct tls_context {
+ struct tls_prot_info prot_info;
+ u8 tx_conf: 3;
+ u8 rx_conf: 3;
+ int (*push_pending_record)(struct sock *, int);
+ void (*sk_write_space)(struct sock *);
+ void *priv_ctx_tx;
+ void *priv_ctx_rx;
+ struct net_device *netdev;
+ struct cipher_context tx;
+ struct cipher_context rx;
+ struct scatterlist *partially_sent_record;
+ u16 partially_sent_offset;
+ bool in_tcp_sendpages;
+ bool pending_open_record_frags;
+ struct mutex tx_lock;
+ long unsigned int flags;
+ struct proto *sk_proto;
+ void (*sk_destruct)(struct sock *);
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
+ struct list_head list;
+ refcount_t refcount;
+ struct callback_head rcu;
+};
+
+struct trace_event_raw_kfree_skb {
+ struct trace_entry ent;
+ void *skbaddr;
+ void *location;
+ short unsigned int protocol;
+ char __data[0];
+};
+
+struct trace_event_raw_consume_skb {
+ struct trace_entry ent;
+ void *skbaddr;
+ char __data[0];
+};
+
+struct trace_event_raw_skb_copy_datagram_iovec {
+ struct trace_entry ent;
+ const void *skbaddr;
+ int len;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_kfree_skb {};
+
+struct trace_event_data_offsets_consume_skb {};
+
+struct trace_event_data_offsets_skb_copy_datagram_iovec {};
+
+typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *);
+
+typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *);
+
+typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int);
+
+struct trace_event_raw_net_dev_start_xmit {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u16 queue_mapping;
+ const void *skbaddr;
+ bool vlan_tagged;
+ u16 vlan_proto;
+ u16 vlan_tci;
+ u16 protocol;
+ u8 ip_summed;
+ unsigned int len;
+ unsigned int data_len;
+ int network_offset;
+ bool transport_offset_valid;
+ int transport_offset;
+ u8 tx_flags;
+ u16 gso_size;
+ u16 gso_segs;
+ u16 gso_type;
+ char __data[0];
+};
+
+struct trace_event_raw_net_dev_xmit {
+ struct trace_entry ent;
+ void *skbaddr;
+ unsigned int len;
+ int rc;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_net_dev_xmit_timeout {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ u32 __data_loc_driver;
+ int queue_index;
+ char __data[0];
+};
+
+struct trace_event_raw_net_dev_template {
+ struct trace_entry ent;
+ void *skbaddr;
+ unsigned int len;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_raw_net_dev_rx_verbose_template {
+ struct trace_entry ent;
+ u32 __data_loc_name;
+ unsigned int napi_id;
+ u16 queue_mapping;
+ const void *skbaddr;
+ bool vlan_tagged;
+ u16 vlan_proto;
+ u16 vlan_tci;
+ u16 protocol;
+ u8 ip_summed;
+ u32 hash;
+ bool l4_hash;
+ unsigned int len;
+ unsigned int data_len;
+ unsigned int truesize;
+ bool mac_header_valid;
+ int mac_header;
+ unsigned char nr_frags;
+ u16 gso_size;
+ u16 gso_type;
+ char __data[0];
+};
+
+struct trace_event_raw_net_dev_rx_exit_template {
+ struct trace_entry ent;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_net_dev_start_xmit {
+ u32 name;
+};
+
+struct trace_event_data_offsets_net_dev_xmit {
+ u32 name;
+};
+
+struct trace_event_data_offsets_net_dev_xmit_timeout {
+ u32 name;
+ u32 driver;
+};
+
+struct trace_event_data_offsets_net_dev_template {
+ u32 name;
+};
+
+struct trace_event_data_offsets_net_dev_rx_verbose_template {
+ u32 name;
+};
+
+struct trace_event_data_offsets_net_dev_rx_exit_template {};
+
+typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *);
+
+typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int);
+
+typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int);
+
+typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *);
+
+typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *);
+
+typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *);
+
+typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_netif_rx_ni_entry)(void *, const struct sk_buff *);
+
+typedef void (*btf_trace_napi_gro_frags_exit)(void *, int);
+
+typedef void (*btf_trace_napi_gro_receive_exit)(void *, int);
+
+typedef void (*btf_trace_netif_receive_skb_exit)(void *, int);
+
+typedef void (*btf_trace_netif_rx_exit)(void *, int);
+
+typedef void (*btf_trace_netif_rx_ni_exit)(void *, int);
+
+typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int);
+
+struct trace_event_raw_napi_poll {
+ struct trace_entry ent;
+ struct napi_struct *napi;
+ u32 __data_loc_dev_name;
+ int work;
+ int budget;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_napi_poll {
+ u32 dev_name;
+};
+
+typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int);
+
+enum tcp_ca_state {
+ TCP_CA_Open = 0,
+ TCP_CA_Disorder = 1,
+ TCP_CA_CWR = 2,
+ TCP_CA_Recovery = 3,
+ TCP_CA_Loss = 4,
+};
+
+struct trace_event_raw_sock_rcvqueue_full {
+ struct trace_entry ent;
+ int rmem_alloc;
+ unsigned int truesize;
+ int sk_rcvbuf;
+ char __data[0];
+};
+
+struct trace_event_raw_sock_exceed_buf_limit {
+ struct trace_entry ent;
+ char name[32];
+ long int *sysctl_mem;
+ long int allocated;
+ int sysctl_rmem;
+ int rmem_alloc;
+ int sysctl_wmem;
+ int wmem_alloc;
+ int wmem_queued;
+ int kind;
+ char __data[0];
+};
+
+struct trace_event_raw_inet_sock_set_state {
+ struct trace_entry ent;
+ const void *skaddr;
+ int oldstate;
+ int newstate;
+ __u16 sport;
+ __u16 dport;
+ __u16 family;
+ __u16 protocol;
+ __u8 saddr[4];
+ __u8 daddr[4];
+ __u8 saddr_v6[16];
+ __u8 daddr_v6[16];
+ char __data[0];
+};
+
+struct trace_event_data_offsets_sock_rcvqueue_full {};
+
+struct trace_event_data_offsets_sock_exceed_buf_limit {};
+
+struct trace_event_data_offsets_inet_sock_set_state {};
+
+typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *);
+
+typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int);
+
+typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int);
+
+struct trace_event_raw_udp_fail_queue_rcv_skb {
+ struct trace_entry ent;
+ int rc;
+ __u16 lport;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_udp_fail_queue_rcv_skb {};
+
+typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *);
+
+struct trace_event_raw_tcp_event_sk_skb {
+ struct trace_entry ent;
+ const void *skbaddr;
+ const void *skaddr;
+ int state;
+ __u16 sport;
+ __u16 dport;
+ __u8 saddr[4];
+ __u8 daddr[4];
+ __u8 saddr_v6[16];
+ __u8 daddr_v6[16];
+ char __data[0];
+};
+
+struct trace_event_raw_tcp_event_sk {
+ struct trace_entry ent;
+ const void *skaddr;
+ __u16 sport;
+ __u16 dport;
+ __u8 saddr[4];
+ __u8 daddr[4];
+ __u8 saddr_v6[16];
+ __u8 daddr_v6[16];
+ __u64 sock_cookie;
+ char __data[0];
+};
+
+struct trace_event_raw_tcp_retransmit_synack {
+ struct trace_entry ent;
+ const void *skaddr;
+ const void *req;
+ __u16 sport;
+ __u16 dport;
+ __u8 saddr[4];
+ __u8 daddr[4];
+ __u8 saddr_v6[16];
+ __u8 daddr_v6[16];
+ char __data[0];
+};
+
+struct trace_event_raw_tcp_probe {
+ struct trace_entry ent;
+ __u8 saddr[28];
+ __u8 daddr[28];
+ __u16 sport;
+ __u16 dport;
+ __u32 mark;
+ __u16 data_len;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 snd_cwnd;
+ __u32 ssthresh;
+ __u32 snd_wnd;
+ __u32 srtt;
+ __u32 rcv_wnd;
+ __u64 sock_cookie;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_tcp_event_sk_skb {};
+
+struct trace_event_data_offsets_tcp_event_sk {};
+
+struct trace_event_data_offsets_tcp_retransmit_synack {};
+
+struct trace_event_data_offsets_tcp_probe {};
+
+typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *);
+
+typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *);
+
+typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *);
+
+typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *);
+
+typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *);
+
+typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *);
+
+typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *);
+
+struct trace_event_raw_fib_table_lookup {
+ struct trace_entry ent;
+ u32 tb_id;
+ int err;
+ int oif;
+ int iif;
+ u8 proto;
+ __u8 tos;
+ __u8 scope;
+ __u8 flags;
+ __u8 src[4];
+ __u8 dst[4];
+ __u8 gw4[4];
+ __u8 gw6[16];
+ u16 sport;
+ u16 dport;
+ u32 __data_loc_name;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_fib_table_lookup {
+ u32 name;
+};
+
+typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int);
+
+struct trace_event_raw_qdisc_dequeue {
+ struct trace_entry ent;
+ struct Qdisc *qdisc;
+ const struct netdev_queue *txq;
+ int packets;
+ void *skbaddr;
+ int ifindex;
+ u32 handle;
+ u32 parent;
+ long unsigned int txq_state;
+ char __data[0];
+};
+
+struct trace_event_raw_qdisc_reset {
+ struct trace_entry ent;
+ u32 __data_loc_dev;
+ u32 __data_loc_kind;
+ u32 parent;
+ u32 handle;
+ char __data[0];
+};
+
+struct trace_event_raw_qdisc_destroy {
+ struct trace_entry ent;
+ u32 __data_loc_dev;
+ u32 __data_loc_kind;
+ u32 parent;
+ u32 handle;
+ char __data[0];
+};
+
+struct trace_event_raw_qdisc_create {
+ struct trace_entry ent;
+ u32 __data_loc_dev;
+ u32 __data_loc_kind;
+ u32 parent;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_qdisc_dequeue {};
+
+struct trace_event_data_offsets_qdisc_reset {
+ u32 dev;
+ u32 kind;
+};
+
+struct trace_event_data_offsets_qdisc_destroy {
+ u32 dev;
+ u32 kind;
+};
+
+struct trace_event_data_offsets_qdisc_create {
+ u32 dev;
+ u32 kind;
+};
+
+typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *);
+
+typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *);
+
+typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *);
+
+typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32);
+
+struct bridge_stp_xstats {
+ __u64 transition_blk;
+ __u64 transition_fwd;
+ __u64 rx_bpdu;
+ __u64 tx_bpdu;
+ __u64 rx_tcn;
+ __u64 tx_tcn;
+};
+
+struct br_mcast_stats {
+ __u64 igmp_v1queries[2];
+ __u64 igmp_v2queries[2];
+ __u64 igmp_v3queries[2];
+ __u64 igmp_leaves[2];
+ __u64 igmp_v1reports[2];
+ __u64 igmp_v2reports[2];
+ __u64 igmp_v3reports[2];
+ __u64 igmp_parse_errors;
+ __u64 mld_v1queries[2];
+ __u64 mld_v2queries[2];
+ __u64 mld_leaves[2];
+ __u64 mld_v1reports[2];
+ __u64 mld_v2reports[2];
+ __u64 mld_parse_errors;
+ __u64 mcast_bytes[2];
+ __u64 mcast_packets[2];
+};
+
+struct br_ip {
+ union {
+ __be32 ip4;
+ struct in6_addr ip6;
+ } u;
+ __be16 proto;
+ __u16 vid;
+};
+
+struct bridge_id {
+ unsigned char prio[2];
+ unsigned char addr[6];
+};
+
+typedef struct bridge_id bridge_id;
+
+struct mac_addr {
+ unsigned char addr[6];
+};
+
+typedef struct mac_addr mac_addr;
+
+typedef __u16 port_id;
+
+struct bridge_mcast_own_query {
+ struct timer_list timer;
+ u32 startup_sent;
+};
+
+struct bridge_mcast_other_query {
+ struct timer_list timer;
+ long unsigned int delay_time;
+};
+
+struct net_bridge_port;
+
+struct bridge_mcast_querier {
+ struct br_ip addr;
+ struct net_bridge_port *port;
+};
+
+struct net_bridge;
+
+struct bridge_mcast_stats;
+
+struct net_bridge_port {
+ struct net_bridge *br;
+ struct net_device *dev;
+ struct list_head list;
+ long unsigned int flags;
+ struct net_bridge_port *backup_port;
+ u8 priority;
+ u8 state;
+ u16 port_no;
+ unsigned char topology_change_ack;
+ unsigned char config_pending;
+ port_id port_id;
+ port_id designated_port;
+ bridge_id designated_root;
+ bridge_id designated_bridge;
+ u32 path_cost;
+ u32 designated_cost;
+ long unsigned int designated_age;
+ struct timer_list forward_delay_timer;
+ struct timer_list hold_timer;
+ struct timer_list message_age_timer;
+ struct kobject kobj;
+ struct callback_head rcu;
+ struct bridge_mcast_own_query ip4_own_query;
+ struct bridge_mcast_own_query ip6_own_query;
+ unsigned char multicast_router;
+ struct bridge_mcast_stats *mcast_stats;
+ struct timer_list multicast_router_timer;
+ struct hlist_head mglist;
+ struct hlist_node rlist;
+ char sysfs_name[16];
+ int offload_fwd_mark;
+ u16 group_fwd_mask;
+ u16 backup_redirected_cnt;
+ struct bridge_stp_xstats stp_xstats;
+};
+
+struct bridge_mcast_stats {
+ struct br_mcast_stats mstats;
+ struct u64_stats_sync syncp;
+};
+
+struct net_bridge {
+ spinlock_t lock;
+ spinlock_t hash_lock;
+ struct list_head port_list;
+ struct net_device *dev;
+ struct pcpu_sw_netstats *stats;
+ long unsigned int options;
+ struct rhashtable fdb_hash_tbl;
+ u16 group_fwd_mask;
+ u16 group_fwd_mask_required;
+ bridge_id designated_root;
+ bridge_id bridge_id;
+ unsigned char topology_change;
+ unsigned char topology_change_detected;
+ u16 root_port;
+ long unsigned int max_age;
+ long unsigned int hello_time;
+ long unsigned int forward_delay;
+ long unsigned int ageing_time;
+ long unsigned int bridge_max_age;
+ long unsigned int bridge_hello_time;
+ long unsigned int bridge_forward_delay;
+ long unsigned int bridge_ageing_time;
+ u32 root_path_cost;
+ u8 group_addr[6];
+ enum {
+ BR_NO_STP = 0,
+ BR_KERNEL_STP = 1,
+ BR_USER_STP = 2,
+ } stp_enabled;
+ u32 hash_max;
+ u32 multicast_last_member_count;
+ u32 multicast_startup_query_count;
+ u8 multicast_igmp_version;
+ u8 multicast_router;
+ u8 multicast_mld_version;
+ spinlock_t multicast_lock;
+ long unsigned int multicast_last_member_interval;
+ long unsigned int multicast_membership_interval;
+ long unsigned int multicast_querier_interval;
+ long unsigned int multicast_query_interval;
+ long unsigned int multicast_query_response_interval;
+ long unsigned int multicast_startup_query_interval;
+ struct rhashtable mdb_hash_tbl;
+ struct hlist_head mdb_list;
+ struct hlist_head router_list;
+ struct timer_list multicast_router_timer;
+ struct bridge_mcast_other_query ip4_other_query;
+ struct bridge_mcast_own_query ip4_own_query;
+ struct bridge_mcast_querier ip4_querier;
+ struct bridge_mcast_stats *mcast_stats;
+ struct bridge_mcast_other_query ip6_other_query;
+ struct bridge_mcast_own_query ip6_own_query;
+ struct bridge_mcast_querier ip6_querier;
+ struct timer_list hello_timer;
+ struct timer_list tcn_timer;
+ struct timer_list topology_change_timer;
+ struct delayed_work gc_work;
+ struct kobject *ifobj;
+ u32 auto_cnt;
+ int offload_fwd_mark;
+ struct hlist_head fdb_list;
+};
+
+struct net_bridge_fdb_key {
+ mac_addr addr;
+ u16 vlan_id;
+};
+
+struct net_bridge_fdb_entry {
+ struct rhash_head rhnode;
+ struct net_bridge_port *dst;
+ struct net_bridge_fdb_key key;
+ struct hlist_node fdb_node;
+ long unsigned int flags;
+ long: 64;
+ long: 64;
+ long unsigned int updated;
+ long unsigned int used;
+ struct callback_head rcu;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct nf_br_ops {
+ int (*br_dev_xmit_hook)(struct sk_buff *);
+};
+
+struct trace_event_raw_br_fdb_add {
+ struct trace_entry ent;
+ u8 ndm_flags;
+ u32 __data_loc_dev;
+ unsigned char addr[6];
+ u16 vid;
+ u16 nlh_flags;
+ char __data[0];
+};
+
+struct trace_event_raw_br_fdb_external_learn_add {
+ struct trace_entry ent;
+ u32 __data_loc_br_dev;
+ u32 __data_loc_dev;
+ unsigned char addr[6];
+ u16 vid;
+ char __data[0];
+};
+
+struct trace_event_raw_fdb_delete {
+ struct trace_entry ent;
+ u32 __data_loc_br_dev;
+ u32 __data_loc_dev;
+ unsigned char addr[6];
+ u16 vid;
+ char __data[0];
+};
+
+struct trace_event_raw_br_fdb_update {
+ struct trace_entry ent;
+ u32 __data_loc_br_dev;
+ u32 __data_loc_dev;
+ unsigned char addr[6];
+ u16 vid;
+ long unsigned int flags;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_br_fdb_add {
+ u32 dev;
+};
+
+struct trace_event_data_offsets_br_fdb_external_learn_add {
+ u32 br_dev;
+ u32 dev;
+};
+
+struct trace_event_data_offsets_fdb_delete {
+ u32 br_dev;
+ u32 dev;
+};
+
+struct trace_event_data_offsets_br_fdb_update {
+ u32 br_dev;
+ u32 dev;
+};
+
+typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16);
+
+typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16);
+
+typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *);
+
+typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int);
+
+struct trace_event_raw_neigh_create {
+ struct trace_entry ent;
+ u32 family;
+ u32 __data_loc_dev;
+ int entries;
+ u8 created;
+ u8 gc_exempt;
+ u8 primary_key4[4];
+ u8 primary_key6[16];
+ char __data[0];
+};
+
+struct trace_event_raw_neigh_update {
+ struct trace_entry ent;
+ u32 family;
+ u32 __data_loc_dev;
+ u8 lladdr[32];
+ u8 lladdr_len;
+ u8 flags;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
+ int refcnt;
+ __u8 primary_key4[4];
+ __u8 primary_key6[16];
+ long unsigned int confirmed;
+ long unsigned int updated;
+ long unsigned int used;
+ u8 new_lladdr[32];
+ u8 new_state;
+ u32 update_flags;
+ u32 pid;
+ char __data[0];
+};
+
+struct trace_event_raw_neigh__update {
+ struct trace_entry ent;
+ u32 family;
+ u32 __data_loc_dev;
+ u8 lladdr[32];
+ u8 lladdr_len;
+ u8 flags;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
+ int refcnt;
+ __u8 primary_key4[4];
+ __u8 primary_key6[16];
+ long unsigned int confirmed;
+ long unsigned int updated;
+ long unsigned int used;
+ u32 err;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_neigh_create {
+ u32 dev;
+};
+
+struct trace_event_data_offsets_neigh_update {
+ u32 dev;
+};
+
+struct trace_event_data_offsets_neigh__update {
+ u32 dev;
+};
+
+typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool);
+
+typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32);
+
+typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int);
+
+typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int);
+
+typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int);
+
+typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int);
+
+typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int);
+
+struct update_classid_context {
+ u32 classid;
+ unsigned int batch;
+};
+
+struct bpf_stab {
+ struct bpf_map map;
+ struct sock **sks;
+ struct sk_psock_progs progs;
+ raw_spinlock_t lock;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64);
+
+typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64);
+
+typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64);
+
+struct bpf_shtab_elem {
+ struct callback_head rcu;
+ u32 hash;
+ struct sock *sk;
+ struct hlist_node node;
+ u8 key[0];
+};
+
+struct bpf_shtab_bucket {
+ struct hlist_head head;
+ raw_spinlock_t lock;
+};
+
+struct bpf_shtab {
+ struct bpf_map map;
+ struct bpf_shtab_bucket *buckets;
+ u32 buckets_num;
+ u32 elem_size;
+ struct sk_psock_progs progs;
+ atomic_t count;
+ long: 32;
+ long: 64;
+ long: 64;
+};
+
+typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64);
+
+typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64);
+
+typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64);
+
+struct dst_cache_pcpu {
+ long unsigned int refresh_ts;
+ struct dst_entry *dst;
+ u32 cookie;
+ union {
+ struct in_addr in_saddr;
+ struct in6_addr in6_saddr;
+ };
+};
+
+struct gro_cell;
+
+struct gro_cells {
+ struct gro_cell *cells;
+};
+
+struct gro_cell {
+ struct sk_buff_head napi_skbs;
+ struct napi_struct napi;
+};
+
+enum netdev_lag_tx_type {
+ NETDEV_LAG_TX_TYPE_UNKNOWN = 0,
+ NETDEV_LAG_TX_TYPE_RANDOM = 1,
+ NETDEV_LAG_TX_TYPE_BROADCAST = 2,
+ NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3,
+ NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4,
+ NETDEV_LAG_TX_TYPE_HASH = 5,
+};
+
+enum netdev_lag_hash {
+ NETDEV_LAG_HASH_NONE = 0,
+ NETDEV_LAG_HASH_L2 = 1,
+ NETDEV_LAG_HASH_L34 = 2,
+ NETDEV_LAG_HASH_L23 = 3,
+ NETDEV_LAG_HASH_E23 = 4,
+ NETDEV_LAG_HASH_E34 = 5,
+ NETDEV_LAG_HASH_UNKNOWN = 6,
+};
+
+struct netdev_lag_upper_info {
+ enum netdev_lag_tx_type tx_type;
+ enum netdev_lag_hash hash_type;
+};
+
+enum {
+ BPF_SK_STORAGE_GET_F_CREATE = 1,
+};
+
+struct bpf_sk_storage_data;
+
+struct bpf_sk_storage {
+ struct bpf_sk_storage_data *cache[16];
+ struct hlist_head list;
+ struct sock *sk;
+ struct callback_head rcu;
+ raw_spinlock_t lock;
+};
+
+enum {
+ SK_DIAG_BPF_STORAGE_REQ_NONE = 0,
+ SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1,
+ __SK_DIAG_BPF_STORAGE_REQ_MAX = 2,
+};
+
+enum {
+ SK_DIAG_BPF_STORAGE_REP_NONE = 0,
+ SK_DIAG_BPF_STORAGE = 1,
+ __SK_DIAG_BPF_STORAGE_REP_MAX = 2,
+};
+
+enum {
+ SK_DIAG_BPF_STORAGE_NONE = 0,
+ SK_DIAG_BPF_STORAGE_PAD = 1,
+ SK_DIAG_BPF_STORAGE_MAP_ID = 2,
+ SK_DIAG_BPF_STORAGE_MAP_VALUE = 3,
+ __SK_DIAG_BPF_STORAGE_MAX = 4,
+};
+
+struct bucket___2 {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+struct bpf_sk_storage_map {
+ struct bpf_map map;
+ struct bucket___2 *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct bpf_sk_storage_data {
+ struct bpf_sk_storage_map *smap;
+ u8 data[0];
+};
+
+struct bpf_sk_storage_elem {
+ struct hlist_node map_node;
+ struct hlist_node snode;
+ struct bpf_sk_storage *sk_storage;
+ struct callback_head rcu;
+ long: 64;
+ struct bpf_sk_storage_data sdata;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64);
+
+typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *);
+
+struct bpf_sk_storage_diag {
+ u32 nr_maps;
+ struct bpf_map *maps[0];
+};
+
+struct llc_addr {
+ unsigned char lsap;
+ unsigned char mac[6];
+};
+
+struct llc_sap {
+ unsigned char state;
+ unsigned char p_bit;
+ unsigned char f_bit;
+ refcount_t refcnt;
+ int (*rcv_func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+ struct llc_addr laddr;
+ struct list_head node;
+ spinlock_t sk_lock;
+ int sk_count;
+ struct hlist_nulls_head sk_laddr_hash[64];
+ struct hlist_head sk_dev_hash[64];
+ struct callback_head rcu;
+};
+
+struct llc_pdu_sn {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+ u8 ctrl_2;
+};
+
+struct llc_pdu_un {
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl_1;
+};
+
+typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *);
+
+struct nvmem_cell___2;
+
+struct datalink_proto {
+ unsigned char type[8];
+ struct llc_sap *sap;
+ short unsigned int header_length;
+ int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+ int (*request)(struct datalink_proto *, struct sk_buff *, unsigned char *);
+ struct list_head node;
+};
+
+struct stp_proto {
+ unsigned char group_address[6];
+ void (*rcv)(const struct stp_proto *, struct sk_buff *, struct net_device *);
+ void *data;
+};
+
+struct tc_ratespec {
+ unsigned char cell_log;
+ __u8 linklayer;
+ short unsigned int overhead;
+ short int cell_align;
+ short unsigned int mpu;
+ __u32 rate;
+};
+
+struct tc_prio_qopt {
+ int bands;
+ __u8 priomap[16];
+};
+
+enum {
+ TCA_UNSPEC = 0,
+ TCA_KIND = 1,
+ TCA_OPTIONS = 2,
+ TCA_STATS = 3,
+ TCA_XSTATS = 4,
+ TCA_RATE = 5,
+ TCA_FCNT = 6,
+ TCA_STATS2 = 7,
+ TCA_STAB = 8,
+ TCA_PAD = 9,
+ TCA_DUMP_INVISIBLE = 10,
+ TCA_CHAIN = 11,
+ TCA_HW_OFFLOAD = 12,
+ TCA_INGRESS_BLOCK = 13,
+ TCA_EGRESS_BLOCK = 14,
+ TCA_DUMP_FLAGS = 15,
+ __TCA_MAX = 16,
+};
+
+struct skb_array {
+ struct ptr_ring ring;
+};
+
+struct psched_ratecfg {
+ u64 rate_bytes_ps;
+ u32 mult;
+ u16 overhead;
+ u8 linklayer;
+ u8 shift;
+};
+
+struct mini_Qdisc {
+ struct tcf_proto *filter_list;
+ struct tcf_block *block;
+ struct gnet_stats_basic_cpu *cpu_bstats;
+ struct gnet_stats_queue *cpu_qstats;
+ struct callback_head rcu;
+};
+
+struct mini_Qdisc_pair {
+ struct mini_Qdisc miniq1;
+ struct mini_Qdisc miniq2;
+ struct mini_Qdisc **p_miniq;
+};
+
+struct pfifo_fast_priv {
+ struct skb_array q[3];
+};
+
+typedef __u32 pao_T_____8;
+
+struct tc_qopt_offload_stats {
+ struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_queue *qstats;
+};
+
+enum tc_mq_command {
+ TC_MQ_CREATE = 0,
+ TC_MQ_DESTROY = 1,
+ TC_MQ_STATS = 2,
+ TC_MQ_GRAFT = 3,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ long unsigned int queue;
+ u32 child_handle;
+};
+
+struct tc_mq_qopt_offload {
+ enum tc_mq_command command;
+ u32 handle;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
+};
+
+struct mq_sched {
+ struct Qdisc **qdiscs;
+};
+
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE = 0,
+ TC_LINKLAYER_ETHERNET = 1,
+ TC_LINKLAYER_ATM = 2,
+};
+
+enum {
+ TCA_STAB_UNSPEC = 0,
+ TCA_STAB_BASE = 1,
+ TCA_STAB_DATA = 2,
+ __TCA_STAB_MAX = 3,
+};
+
+struct qdisc_rate_table {
+ struct tc_ratespec rate;
+ u32 data[256];
+ struct qdisc_rate_table *next;
+ int refcnt;
+};
+
+struct Qdisc_class_common {
+ u32 classid;
+ struct hlist_node hnode;
+};
+
+struct Qdisc_class_hash {
+ struct hlist_head *hash;
+ unsigned int hashsize;
+ unsigned int hashmask;
+ unsigned int hashelems;
+};
+
+struct qdisc_watchdog {
+ u64 last_expires;
+ struct hrtimer timer;
+ struct Qdisc *qdisc;
+};
+
+enum tc_root_command {
+ TC_ROOT_GRAFT = 0,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
+struct check_loop_arg {
+ struct qdisc_walker w;
+ struct Qdisc *p;
+ int depth;
+};
+
+struct tcf_bind_args {
+ struct tcf_walker w;
+ long unsigned int base;
+ long unsigned int cl;
+ u32 classid;
+};
+
+struct tc_bind_class_args {
+ struct qdisc_walker w;
+ long unsigned int new_cl;
+ u32 portid;
+ u32 clid;
+};
+
+struct qdisc_dump_args {
+ struct qdisc_walker w;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+};
+
+enum net_xmit_qdisc_t {
+ __NET_XMIT_STOLEN = 65536,
+ __NET_XMIT_BYPASS = 131072,
+};
+
+struct tcf_t {
+ __u64 install;
+ __u64 lastuse;
+ __u64 expires;
+ __u64 firstuse;
+};
+
+struct psample_group {
+ struct list_head list;
+ struct net *net;
+ u32 group_num;
+ u32 refcount;
+ u32 seq;
+ struct callback_head rcu;
+};
+
+struct action_gate_entry {
+ u8 gate_state;
+ u32 interval;
+ s32 ipv;
+ s32 maxoctets;
+};
+
+enum qdisc_class_ops_flags {
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
+};
+
+enum tcf_proto_ops_flags {
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
+};
+
+typedef void tcf_chain_head_change_t(struct tcf_proto *, void *);
+
+struct tcf_idrinfo {
+ struct mutex lock;
+ struct idr action_idr;
+ struct net *net;
+};
+
+struct tc_action_ops;
+
+struct tc_cookie;
+
+struct tc_action {
+ const struct tc_action_ops *ops;
+ __u32 type;
+ struct tcf_idrinfo *idrinfo;
+ u32 tcfa_index;
+ refcount_t tcfa_refcnt;
+ atomic_t tcfa_bindcnt;
+ int tcfa_action;
+ struct tcf_t tcfa_tm;
+ struct gnet_stats_basic_packed tcfa_bstats;
+ struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_queue tcfa_qstats;
+ struct net_rate_estimator *tcfa_rate_est;
+ spinlock_t tcfa_lock;
+ struct gnet_stats_basic_cpu *cpu_bstats;
+ struct gnet_stats_basic_cpu *cpu_bstats_hw;
+ struct gnet_stats_queue *cpu_qstats;
+ struct tc_cookie *act_cookie;
+ struct tcf_chain *goto_chain;
+ u32 tcfa_flags;
+ u8 hw_stats;
+ u8 used_hw_stats;
+ bool used_hw_stats_valid;
+};
+
+struct tc_cookie {
+ u8 *data;
+ u32 len;
+ struct callback_head rcu;
+};
+
+struct tcf_block_ext_info {
+ enum flow_block_binder_type binder_type;
+ tcf_chain_head_change_t *chain_head_change;
+ void *chain_head_change_priv;
+ u32 block_index;
+};
+
+struct tcf_exts {
+ int action;
+ int police;
+};
+
+enum pedit_cmd {
+ TCA_PEDIT_KEY_EX_CMD_SET = 0,
+ TCA_PEDIT_KEY_EX_CMD_ADD = 1,
+ __PEDIT_CMD_MAX = 2,
+};
+
+struct nf_conntrack_l4proto___2;
+
+struct PptpControlHeader {
+ __be16 messageType;
+ __u16 reserved;
+};
+
+struct PptpStartSessionRequest {
+ __be16 protocolVersion;
+ __u16 reserved1;
+ __be32 framingCapability;
+ __be32 bearerCapability;
+ __be16 maxChannels;
+ __be16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+struct PptpStartSessionReply {
+ __be16 protocolVersion;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be32 framingCapability;
+ __be32 bearerCapability;
+ __be16 maxChannels;
+ __be16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+struct PptpStopSessionRequest {
+ __u8 reason;
+ __u8 reserved1;
+ __u16 reserved2;
+};
+
+struct PptpStopSessionReply {
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 reserved1;
+};
+
+struct PptpOutCallRequest {
+ __be16 callID;
+ __be16 callSerialNumber;
+ __be32 minBPS;
+ __be32 maxBPS;
+ __be32 bearerType;
+ __be32 framingType;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be16 phoneNumberLength;
+ __u16 reserved1;
+ __u8 phoneNumber[64];
+ __u8 subAddress[64];
+};
+
+struct PptpOutCallReply {
+ __be16 callID;
+ __be16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 causeCode;
+ __be32 connectSpeed;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be32 physChannelID;
+};
+
+struct PptpInCallRequest {
+ __be16 callID;
+ __be16 callSerialNumber;
+ __be32 callBearerType;
+ __be32 physChannelID;
+ __be16 dialedNumberLength;
+ __be16 dialingNumberLength;
+ __u8 dialedNumber[64];
+ __u8 dialingNumber[64];
+ __u8 subAddress[64];
+};
+
+struct PptpInCallReply {
+ __be16 callID;
+ __be16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __u16 reserved;
+};
+
+struct PptpInCallConnected {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 connectSpeed;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be32 callFramingType;
+};
+
+struct PptpClearCallRequest {
+ __be16 callID;
+ __u16 reserved;
+};
+
+struct PptpCallDisconnectNotify {
+ __be16 callID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 causeCode;
+ __u16 reserved;
+ __u8 callStatistics[128];
+};
+
+struct PptpWanErrorNotify {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 crcErrors;
+ __be32 framingErrors;
+ __be32 hardwareOverRuns;
+ __be32 bufferOverRuns;
+ __be32 timeoutErrors;
+ __be32 alignmentErrors;
+};
+
+struct PptpSetLinkInfo {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 sendAccm;
+ __be32 recvAccm;
+};
+
+union pptp_ctrl_union {
+ struct PptpStartSessionRequest sreq;
+ struct PptpStartSessionReply srep;
+ struct PptpStopSessionRequest streq;
+ struct PptpStopSessionReply strep;
+ struct PptpOutCallRequest ocreq;
+ struct PptpOutCallReply ocack;
+ struct PptpInCallRequest icreq;
+ struct PptpInCallReply icack;
+ struct PptpInCallConnected iccon;
+ struct PptpClearCallRequest clrreq;
+ struct PptpCallDisconnectNotify disc;
+ struct PptpWanErrorNotify wanerr;
+ struct PptpSetLinkInfo setlink;
+};
+
+struct tcf_filter_chain_list_item {
+ struct list_head list;
+ tcf_chain_head_change_t *chain_head_change;
+ void *chain_head_change_priv;
+};
+
+struct tcf_net {
+ spinlock_t idr_lock;
+ struct idr idr;
+};
+
+struct tcf_block_owner_item {
+ struct list_head list;
+ struct Qdisc *q;
+ enum flow_block_binder_type binder_type;
+};
+
+struct tcf_chain_info {
+ struct tcf_proto **pprev;
+ struct tcf_proto *next;
+};
+
+struct tcf_dump_args {
+ struct tcf_walker w;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ struct tcf_block *block;
+ struct Qdisc *q;
+ u32 parent;
+ bool terse_dump;
+};
+
+struct tc_fifo_qopt {
+ __u32 limit;
+};
+
+enum tc_fifo_command {
+ TC_FIFO_REPLACE = 0,
+ TC_FIFO_DESTROY = 1,
+ TC_FIFO_STATS = 2,
+};
+
+struct tc_fifo_qopt_offload {
+ enum tc_fifo_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
+struct tc_cbq_lssopt {
+ unsigned char change;
+ unsigned char flags;
+ unsigned char ewma_log;
+ unsigned char level;
+ __u32 maxidle;
+ __u32 minidle;
+ __u32 offtime;
+ __u32 avpkt;
+};
+
+struct tc_cbq_wrropt {
+ unsigned char flags;
+ unsigned char priority;
+ unsigned char cpriority;
+ unsigned char __reserved;
+ __u32 allot;
+ __u32 weight;
+};
+
+struct tc_cbq_fopt {
+ __u32 split;
+ __u32 defmap;
+ __u32 defchange;
+};
+
+struct tc_cbq_xstats {
+ __u32 borrows;
+ __u32 overactions;
+ __s32 avgidle;
+ __s32 undertime;
+};
+
+enum {
+ TCA_CBQ_UNSPEC = 0,
+ TCA_CBQ_LSSOPT = 1,
+ TCA_CBQ_WRROPT = 2,
+ TCA_CBQ_FOPT = 3,
+ TCA_CBQ_OVL_STRATEGY = 4,
+ TCA_CBQ_RATE = 5,
+ TCA_CBQ_RTAB = 6,
+ TCA_CBQ_POLICE = 7,
+ __TCA_CBQ_MAX = 8,
+};
+
+typedef u64 psched_time_t;
+
+typedef long int psched_tdiff_t;
+
+struct cbq_class {
+ struct Qdisc_class_common common;
+ struct cbq_class *next_alive;
+ unsigned char priority;
+ unsigned char priority2;
+ unsigned char ewma_log;
+ u32 defmap;
+ long int maxidle;
+ long int offtime;
+ long int minidle;
+ u32 avpkt;
+ struct qdisc_rate_table *R_tab;
+ long int allot;
+ long int quantum;
+ long int weight;
+ struct Qdisc *qdisc;
+ struct cbq_class *split;
+ struct cbq_class *share;
+ struct cbq_class *tparent;
+ struct cbq_class *borrow;
+ struct cbq_class *sibling;
+ struct cbq_class *children;
+ struct Qdisc *q;
+ unsigned char cpriority;
+ unsigned char delayed;
+ unsigned char level;
+ psched_time_t last;
+ psched_time_t undertime;
+ long int avgidle;
+ long int deficit;
+ psched_time_t penalized;
+ struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_queue qstats;
+ struct net_rate_estimator *rate_est;
+ struct tc_cbq_xstats xstats;
+ struct tcf_proto *filter_list;
+ struct tcf_block *block;
+ int filters;
+ struct cbq_class *defaults[16];
+};
+
+struct cbq_sched_data {
+ struct Qdisc_class_hash clhash;
+ int nclasses[9];
+ unsigned int quanta[9];
+ struct cbq_class link;
+ unsigned int activemask;
+ struct cbq_class *active[9];
+ struct cbq_class *tx_class;
+ struct cbq_class *tx_borrowed;
+ int tx_len;
+ psched_time_t now;
+ unsigned int pmask;
+ struct hrtimer delay_timer;
+ struct qdisc_watchdog watchdog;
+ psched_tdiff_t wd_expires;
+ int toplevel;
+ u32 hgenerator;
+};
+
+struct tc_multiq_qopt {
+ __u16 bands;
+ __u16 max_bands;
+};
+
+struct multiq_sched_data {
+ u16 bands;
+ u16 max_bands;
+ u16 curband;
+ struct tcf_proto *filter_list;
+ struct tcf_block *block;
+ struct Qdisc **queues;
+};
+
+enum {
+ TCA_FQ_CODEL_UNSPEC = 0,
+ TCA_FQ_CODEL_TARGET = 1,
+ TCA_FQ_CODEL_LIMIT = 2,
+ TCA_FQ_CODEL_INTERVAL = 3,
+ TCA_FQ_CODEL_ECN = 4,
+ TCA_FQ_CODEL_FLOWS = 5,
+ TCA_FQ_CODEL_QUANTUM = 6,
+ TCA_FQ_CODEL_CE_THRESHOLD = 7,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE = 8,
+ TCA_FQ_CODEL_MEMORY_LIMIT = 9,
+ __TCA_FQ_CODEL_MAX = 10,
+};
+
+enum {
+ TCA_FQ_CODEL_XSTATS_QDISC = 0,
+ TCA_FQ_CODEL_XSTATS_CLASS = 1,
+};
+
+struct tc_fq_codel_qd_stats {
+ __u32 maxpacket;
+ __u32 drop_overlimit;
+ __u32 ecn_mark;
+ __u32 new_flow_count;
+ __u32 new_flows_len;
+ __u32 old_flows_len;
+ __u32 ce_mark;
+ __u32 memory_usage;
+ __u32 drop_overmemory;
+};
+
+struct tc_fq_codel_cl_stats {
+ __s32 deficit;
+ __u32 ldelay;
+ __u32 count;
+ __u32 lastcount;
+ __u32 dropping;
+ __s32 drop_next;
+};
+
+struct tc_fq_codel_xstats {
+ __u32 type;
+ union {
+ struct tc_fq_codel_qd_stats qdisc_stats;
+ struct tc_fq_codel_cl_stats class_stats;
+ };
+};
+
+typedef u32 codel_time_t;
+
+typedef s32 codel_tdiff_t;
+
+struct codel_params {
+ codel_time_t target;
+ codel_time_t ce_threshold;
+ codel_time_t interval;
+ u32 mtu;
+ bool ecn;
+};
+
+struct codel_vars {
+ u32 count;
+ u32 lastcount;
+ bool dropping;
+ u16 rec_inv_sqrt;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+};
+
+struct codel_stats {
+ u32 maxpacket;
+ u32 drop_count;
+ u32 drop_len;
+ u32 ecn_mark;
+ u32 ce_mark;
+};
+
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *);
+
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *);
+
+typedef void (*codel_skb_drop_t)(struct sk_buff *, void *);
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *, void *);
+
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+ unsigned int mem_usage;
+};
+
+struct fq_codel_flow {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head flowchain;
+ int deficit;
+ struct codel_vars cvars;
+};
+
+struct fq_codel_sched_data {
+ struct tcf_proto *filter_list;
+ struct tcf_block *block;
+ struct fq_codel_flow *flows;
+ u32 *backlogs;
+ u32 flows_cnt;
+ u32 quantum;
+ u32 drop_batch_size;
+ u32 memory_limit;
+ struct codel_params cparams;
+ struct codel_stats cstats;
+ u32 memory_usage;
+ u32 drop_overmemory;
+ u32 drop_overlimit;
+ u32 new_flow_count;
+ struct list_head new_flows;
+ struct list_head old_flows;
+};
+
+enum {
+ TCA_FQ_UNSPEC = 0,
+ TCA_FQ_PLIMIT = 1,
+ TCA_FQ_FLOW_PLIMIT = 2,
+ TCA_FQ_QUANTUM = 3,
+ TCA_FQ_INITIAL_QUANTUM = 4,
+ TCA_FQ_RATE_ENABLE = 5,
+ TCA_FQ_FLOW_DEFAULT_RATE = 6,
+ TCA_FQ_FLOW_MAX_RATE = 7,
+ TCA_FQ_BUCKETS_LOG = 8,
+ TCA_FQ_FLOW_REFILL_DELAY = 9,
+ TCA_FQ_ORPHAN_MASK = 10,
+ TCA_FQ_LOW_RATE_THRESHOLD = 11,
+ TCA_FQ_CE_THRESHOLD = 12,
+ TCA_FQ_TIMER_SLACK = 13,
+ TCA_FQ_HORIZON = 14,
+ TCA_FQ_HORIZON_DROP = 15,
+ __TCA_FQ_MAX = 16,
+};
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 unthrottle_latency_ns;
+ __u64 ce_mark;
+ __u64 horizon_drops;
+ __u64 horizon_caps;
+};
+
+struct fq_skb_cb {
+ u64 time_to_send;
+};
+
+struct fq_flow {
+ struct rb_root t_root;
+ struct sk_buff *head;
+ union {
+ struct sk_buff *tail;
+ long unsigned int age;
+ };
+ struct rb_node fq_node;
+ struct sock *sk;
+ u32 socket_hash;
+ int qlen;
+ int credit;
+ struct fq_flow *next;
+ struct rb_node rate_node;
+ u64 time_next_packet;
+ long: 64;
+ long: 64;
+};
+
+struct fq_flow_head {
+ struct fq_flow *first;
+ struct fq_flow *last;
+};
+
+struct fq_sched_data {
+ struct fq_flow_head new_flows;
+ struct fq_flow_head old_flows;
+ struct rb_root delayed;
+ u64 time_next_delayed_flow;
+ u64 ktime_cache;
+ long unsigned int unthrottle_latency_ns;
+ struct fq_flow internal;
+ u32 quantum;
+ u32 initial_quantum;
+ u32 flow_refill_delay;
+ u32 flow_plimit;
+ long unsigned int flow_max_rate;
+ u64 ce_threshold;
+ u64 horizon;
+ u32 orphan_mask;
+ u32 low_rate_threshold;
+ struct rb_root *fq_root;
+ u8 rate_enable;
+ u8 fq_trees_log;
+ u8 horizon_drop;
+ u32 flows;
+ u32 inactive_flows;
+ u32 throttled_flows;
+ u64 stat_gc_flows;
+ u64 stat_internal_packets;
+ u64 stat_throttled;
+ u64 stat_ce_mark;
+ u64 stat_horizon_drops;
+ u64 stat_horizon_caps;
+ u64 stat_flows_plimit;
+ u64 stat_pkts_too_long;
+ u64 stat_allocation_errors;
+ u32 timer_slack;
+ struct qdisc_watchdog watchdog;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ TCA_CGROUP_UNSPEC = 0,
+ TCA_CGROUP_ACT = 1,
+ TCA_CGROUP_POLICE = 2,
+ TCA_CGROUP_EMATCHES = 3,
+ __TCA_CGROUP_MAX = 4,
+};
+
+struct tcf_ematch_tree_hdr {
+ __u16 nmatches;
+ __u16 progid;
+};
+
+struct tcf_pkt_info {
+ unsigned char *ptr;
+ int nexthdr;
+};
+
+struct tcf_ematch_ops;
+
+struct tcf_ematch {
+ struct tcf_ematch_ops *ops;
+ long unsigned int data;
+ unsigned int datalen;
+ u16 matchid;
+ u16 flags;
+ struct net *net;
+};
+
+struct tcf_ematch_ops {
+ int kind;
+ int datalen;
+ int (*change)(struct net *, void *, int, struct tcf_ematch *);
+ int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *);
+ void (*destroy)(struct tcf_ematch *);
+ int (*dump)(struct sk_buff *, struct tcf_ematch *);
+ struct module *owner;
+ struct list_head link;
+};
+
+struct tcf_ematch_tree {
+ struct tcf_ematch_tree_hdr hdr;
+ struct tcf_ematch *matches;
+};
+
+struct cls_cgroup_head {
+ u32 handle;
+ struct tcf_exts exts;
+ struct tcf_ematch_tree ematches;
+ struct tcf_proto *tp;
+ struct rcu_work rwork;
+};
+
+enum {
+ TCA_BPF_UNSPEC = 0,
+ TCA_BPF_ACT = 1,
+ TCA_BPF_POLICE = 2,
+ TCA_BPF_CLASSID = 3,
+ TCA_BPF_OPS_LEN = 4,
+ TCA_BPF_OPS = 5,
+ TCA_BPF_FD = 6,
+ TCA_BPF_NAME = 7,
+ TCA_BPF_FLAGS = 8,
+ TCA_BPF_FLAGS_GEN = 9,
+ TCA_BPF_TAG = 10,
+ TCA_BPF_ID = 11,
+ __TCA_BPF_MAX = 12,
+};
+
+struct flow_cls_common_offload {
+ u32 chain_index;
+ __be16 protocol;
+ u32 prio;
+ struct netlink_ext_ack *extack;
+};
+
+enum tc_clsbpf_command {
+ TC_CLSBPF_OFFLOAD = 0,
+ TC_CLSBPF_STATS = 1,
+};
+
+struct tc_cls_bpf_offload {
+ struct flow_cls_common_offload common;
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ struct bpf_prog *oldprog;
+ const char *name;
+ bool exts_integrated;
+};
+
+struct cls_bpf_head {
+ struct list_head plist;
+ struct idr handle_idr;
+ struct callback_head rcu;
+};
+
+struct cls_bpf_prog {
+ struct bpf_prog *filter;
+ struct list_head link;
+ struct tcf_result res;
+ bool exts_integrated;
+ u32 gen_flags;
+ unsigned int in_hw_count;
+ struct tcf_exts exts;
+ u32 handle;
+ u16 bpf_num_ops;
+ struct sock_filter *bpf_ops;
+ const char *bpf_name;
+ struct tcf_proto *tp;
+ struct rcu_work rwork;
+};
+
+enum {
+ TCA_EMATCH_TREE_UNSPEC = 0,
+ TCA_EMATCH_TREE_HDR = 1,
+ TCA_EMATCH_TREE_LIST = 2,
+ __TCA_EMATCH_TREE_MAX = 3,
+};
+
+struct tcf_ematch_hdr {
+ __u16 matchid;
+ __u16 kind;
+ __u16 flags;
+ __u16 pad;
+};
+
+struct sockaddr_nl {
+ __kernel_sa_family_t nl_family;
+ short unsigned int nl_pad;
+ __u32 nl_pid;
+ __u32 nl_groups;
+};
+
+struct nlmsgerr {
+ int error;
+ struct nlmsghdr msg;
+};
+
+enum nlmsgerr_attrs {
+ NLMSGERR_ATTR_UNUSED = 0,
+ NLMSGERR_ATTR_MSG = 1,
+ NLMSGERR_ATTR_OFFS = 2,
+ NLMSGERR_ATTR_COOKIE = 3,
+ __NLMSGERR_ATTR_MAX = 4,
+ NLMSGERR_ATTR_MAX = 3,
+};
+
+struct nl_pktinfo {
+ __u32 group;
+};
+
+enum {
+ NETLINK_UNCONNECTED = 0,
+ NETLINK_CONNECTED = 1,
+};
+
+enum netlink_skb_flags {
+ NETLINK_SKB_DST = 8,
+};
+
+struct netlink_notify {
+ struct net *net;
+ u32 portid;
+ int protocol;
+};
+
+struct netlink_tap {
+ struct net_device *dev;
+ struct module *module;
+ struct list_head list;
+};
+
+struct netlink_sock {
+ struct sock sk;
+ u32 portid;
+ u32 dst_portid;
+ u32 dst_group;
+ u32 flags;
+ u32 subscriptions;
+ u32 ngroups;
+ long unsigned int *groups;
+ long unsigned int state;
+ size_t max_recvmsg_len;
+ wait_queue_head_t wait;
+ bool bound;
+ bool cb_running;
+ int dump_done_errno;
+ struct netlink_callback cb;
+ struct mutex *cb_mutex;
+ struct mutex cb_def_mutex;
+ void (*netlink_rcv)(struct sk_buff *);
+ int (*netlink_bind)(struct net *, int);
+ void (*netlink_unbind)(struct net *, int);
+ struct module *module;
+ struct rhash_head node;
+ struct callback_head rcu;
+ struct work_struct work;
+};
+
+struct listeners;
+
+struct netlink_table {
+ struct rhashtable hash;
+ struct hlist_head mc_list;
+ struct listeners *listeners;
+ unsigned int flags;
+ unsigned int groups;
+ struct mutex *cb_mutex;
+ struct module *module;
+ int (*bind)(struct net *, int);
+ void (*unbind)(struct net *, int);
+ bool (*compare)(struct net *, struct sock *);
+ int registered;
+};
+
+struct listeners {
+ struct callback_head rcu;
+ long unsigned int masks[0];
+};
+
+struct netlink_tap_net {
+ struct list_head netlink_tap_all;
+ struct mutex netlink_tap_lock;
+};
+
+struct netlink_compare_arg {
+ possible_net_t pnet;
+ u32 portid;
+};
+
+struct netlink_broadcast_data {
+ struct sock *exclude_sk;
+ struct net *net;
+ u32 portid;
+ u32 group;
+ int failure;
+ int delivery_failure;
+ int congested;
+ int delivered;
+ gfp_t allocation;
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ int (*tx_filter)(struct sock *, struct sk_buff *, void *);
+ void *tx_data;
+};
+
+struct netlink_set_err_data {
+ struct sock *exclude_sk;
+ u32 portid;
+ u32 group;
+ int code;
+};
+
+struct nl_seq_iter {
+ struct seq_net_private p;
+ struct rhashtable_iter hti;
+ int link;
+};
+
+struct bpf_iter__netlink {
+ union {
+ struct bpf_iter_meta *meta;
+ };
+ union {
+ struct netlink_sock *sk;
+ };
+};
+
+enum {
+ CTRL_CMD_UNSPEC = 0,
+ CTRL_CMD_NEWFAMILY = 1,
+ CTRL_CMD_DELFAMILY = 2,
+ CTRL_CMD_GETFAMILY = 3,
+ CTRL_CMD_NEWOPS = 4,
+ CTRL_CMD_DELOPS = 5,
+ CTRL_CMD_GETOPS = 6,
+ CTRL_CMD_NEWMCAST_GRP = 7,
+ CTRL_CMD_DELMCAST_GRP = 8,
+ CTRL_CMD_GETMCAST_GRP = 9,
+ CTRL_CMD_GETPOLICY = 10,
+ __CTRL_CMD_MAX = 11,
+};
+
+enum {
+ CTRL_ATTR_UNSPEC = 0,
+ CTRL_ATTR_FAMILY_ID = 1,
+ CTRL_ATTR_FAMILY_NAME = 2,
+ CTRL_ATTR_VERSION = 3,
+ CTRL_ATTR_HDRSIZE = 4,
+ CTRL_ATTR_MAXATTR = 5,
+ CTRL_ATTR_OPS = 6,
+ CTRL_ATTR_MCAST_GROUPS = 7,
+ CTRL_ATTR_POLICY = 8,
+ __CTRL_ATTR_MAX = 9,
+};
+
+enum {
+ CTRL_ATTR_OP_UNSPEC = 0,
+ CTRL_ATTR_OP_ID = 1,
+ CTRL_ATTR_OP_FLAGS = 2,
+ __CTRL_ATTR_OP_MAX = 3,
+};
+
+enum {
+ CTRL_ATTR_MCAST_GRP_UNSPEC = 0,
+ CTRL_ATTR_MCAST_GRP_NAME = 1,
+ CTRL_ATTR_MCAST_GRP_ID = 2,
+ __CTRL_ATTR_MCAST_GRP_MAX = 3,
+};
+
+enum genl_validate_flags {
+ GENL_DONT_VALIDATE_STRICT = 1,
+ GENL_DONT_VALIDATE_DUMP = 2,
+ GENL_DONT_VALIDATE_DUMP_STRICT = 4,
+};
+
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ const struct genl_ops *ops;
+ struct nlattr **attrs;
+};
+
+struct genl_start_context {
+ const struct genl_family *family;
+ struct nlmsghdr *nlh;
+ struct netlink_ext_ack *extack;
+ const struct genl_ops *ops;
+ int hdrlen;
+};
+
+enum netlink_attribute_type {
+ NL_ATTR_TYPE_INVALID = 0,
+ NL_ATTR_TYPE_FLAG = 1,
+ NL_ATTR_TYPE_U8 = 2,
+ NL_ATTR_TYPE_U16 = 3,
+ NL_ATTR_TYPE_U32 = 4,
+ NL_ATTR_TYPE_U64 = 5,
+ NL_ATTR_TYPE_S8 = 6,
+ NL_ATTR_TYPE_S16 = 7,
+ NL_ATTR_TYPE_S32 = 8,
+ NL_ATTR_TYPE_S64 = 9,
+ NL_ATTR_TYPE_BINARY = 10,
+ NL_ATTR_TYPE_STRING = 11,
+ NL_ATTR_TYPE_NUL_STRING = 12,
+ NL_ATTR_TYPE_NESTED = 13,
+ NL_ATTR_TYPE_NESTED_ARRAY = 14,
+ NL_ATTR_TYPE_BITFIELD32 = 15,
+};
+
+enum netlink_policy_type_attr {
+ NL_POLICY_TYPE_ATTR_UNSPEC = 0,
+ NL_POLICY_TYPE_ATTR_TYPE = 1,
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2,
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3,
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4,
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5,
+ NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6,
+ NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7,
+ NL_POLICY_TYPE_ATTR_POLICY_IDX = 8,
+ NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9,
+ NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10,
+ NL_POLICY_TYPE_ATTR_PAD = 11,
+ __NL_POLICY_TYPE_ATTR_MAX = 12,
+ NL_POLICY_TYPE_ATTR_MAX = 11,
+};
+
+struct nl_policy_dump {
+ unsigned int policy_idx;
+ unsigned int attr_idx;
+ unsigned int n_alloc;
+ struct {
+ const struct nla_policy *policy;
+ unsigned int maxtype;
+ } policies[0];
+};
+
+struct trace_event_raw_bpf_test_finish {
+ struct trace_entry ent;
+ int err;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_bpf_test_finish {};
+
+typedef void (*btf_trace_bpf_test_finish)(void *, int *);
+
+struct ethtool_cmd {
+ __u32 cmd;
+ __u32 supported;
+ __u32 advertising;
+ __u16 speed;
+ __u8 duplex;
+ __u8 port;
+ __u8 phy_address;
+ __u8 transceiver;
+ __u8 autoneg;
+ __u8 mdio_support;
+ __u32 maxtxpkt;
+ __u32 maxrxpkt;
+ __u16 speed_hi;
+ __u8 eth_tp_mdix;
+ __u8 eth_tp_mdix_ctrl;
+ __u32 lp_advertising;
+ __u32 reserved[2];
+};
+
+struct ethtool_value {
+ __u32 cmd;
+ __u32 data;
+};
+
+enum tunable_id {
+ ETHTOOL_ID_UNSPEC = 0,
+ ETHTOOL_RX_COPYBREAK = 1,
+ ETHTOOL_TX_COPYBREAK = 2,
+ ETHTOOL_PFC_PREVENTION_TOUT = 3,
+ __ETHTOOL_TUNABLE_COUNT = 4,
+};
+
+enum tunable_type_id {
+ ETHTOOL_TUNABLE_UNSPEC = 0,
+ ETHTOOL_TUNABLE_U8 = 1,
+ ETHTOOL_TUNABLE_U16 = 2,
+ ETHTOOL_TUNABLE_U32 = 3,
+ ETHTOOL_TUNABLE_U64 = 4,
+ ETHTOOL_TUNABLE_STRING = 5,
+ ETHTOOL_TUNABLE_S8 = 6,
+ ETHTOOL_TUNABLE_S16 = 7,
+ ETHTOOL_TUNABLE_S32 = 8,
+ ETHTOOL_TUNABLE_S64 = 9,
+};
+
+enum phy_tunable_id {
+ ETHTOOL_PHY_ID_UNSPEC = 0,
+ ETHTOOL_PHY_DOWNSHIFT = 1,
+ ETHTOOL_PHY_FAST_LINK_DOWN = 2,
+ ETHTOOL_PHY_EDPD = 3,
+ __ETHTOOL_PHY_TUNABLE_COUNT = 4,
+};
+
+struct ethtool_gstrings {
+ __u32 cmd;
+ __u32 string_set;
+ __u32 len;
+ __u8 data[0];
+};
+
+struct ethtool_sset_info {
+ __u32 cmd;
+ __u32 reserved;
+ __u64 sset_mask;
+ __u32 data[0];
+};
+
+struct ethtool_perm_addr {
+ __u32 cmd;
+ __u32 size;
+ __u8 data[0];
+};
+
+enum ethtool_flags {
+ ETH_FLAG_TXVLAN = 128,
+ ETH_FLAG_RXVLAN = 256,
+ ETH_FLAG_LRO = 32768,
+ ETH_FLAG_NTUPLE = 134217728,
+ ETH_FLAG_RXHASH = 268435456,
+};
+
+struct ethtool_rxfh {
+ __u32 cmd;
+ __u32 rss_context;
+ __u32 indir_size;
+ __u32 key_size;
+ __u8 hfunc;
+ __u8 rsvd8[3];
+ __u32 rsvd32;
+ __u32 rss_config[0];
+};
+
+struct ethtool_get_features_block {
+ __u32 available;
+ __u32 requested;
+ __u32 active;
+ __u32 never_changed;
+};
+
+struct ethtool_gfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_get_features_block features[0];
+};
+
+struct ethtool_set_features_block {
+ __u32 valid;
+ __u32 requested;
+};
+
+struct ethtool_sfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_set_features_block features[0];
+};
+
+enum ethtool_sfeatures_retval_bits {
+ ETHTOOL_F_UNSUPPORTED__BIT = 0,
+ ETHTOOL_F_WISH__BIT = 1,
+ ETHTOOL_F_COMPAT__BIT = 2,
+};
+
+struct ethtool_per_queue_op {
+ __u32 cmd;
+ __u32 sub_command;
+ __u32 queue_mask[128];
+ char data[0];
+};
+
+enum {
+ ETH_RSS_HASH_TOP_BIT = 0,
+ ETH_RSS_HASH_XOR_BIT = 1,
+ ETH_RSS_HASH_CRC32_BIT = 2,
+ ETH_RSS_HASH_FUNCS_COUNT = 3,
+};
+
+struct ethtool_rx_flow_rule {
+ struct flow_rule *rule;
+ long unsigned int priv[0];
+};
+
+struct ethtool_rx_flow_spec_input {
+ const struct ethtool_rx_flow_spec *fs;
+ u32 rss_ctx;
+};
+
+enum {
+ ETHTOOL_MSG_KERNEL_NONE = 0,
+ ETHTOOL_MSG_STRSET_GET_REPLY = 1,
+ ETHTOOL_MSG_LINKINFO_GET_REPLY = 2,
+ ETHTOOL_MSG_LINKINFO_NTF = 3,
+ ETHTOOL_MSG_LINKMODES_GET_REPLY = 4,
+ ETHTOOL_MSG_LINKMODES_NTF = 5,
+ ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6,
+ ETHTOOL_MSG_DEBUG_GET_REPLY = 7,
+ ETHTOOL_MSG_DEBUG_NTF = 8,
+ ETHTOOL_MSG_WOL_GET_REPLY = 9,
+ ETHTOOL_MSG_WOL_NTF = 10,
+ ETHTOOL_MSG_FEATURES_GET_REPLY = 11,
+ ETHTOOL_MSG_FEATURES_SET_REPLY = 12,
+ ETHTOOL_MSG_FEATURES_NTF = 13,
+ ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14,
+ ETHTOOL_MSG_PRIVFLAGS_NTF = 15,
+ ETHTOOL_MSG_RINGS_GET_REPLY = 16,
+ ETHTOOL_MSG_RINGS_NTF = 17,
+ ETHTOOL_MSG_CHANNELS_GET_REPLY = 18,
+ ETHTOOL_MSG_CHANNELS_NTF = 19,
+ ETHTOOL_MSG_COALESCE_GET_REPLY = 20,
+ ETHTOOL_MSG_COALESCE_NTF = 21,
+ ETHTOOL_MSG_PAUSE_GET_REPLY = 22,
+ ETHTOOL_MSG_PAUSE_NTF = 23,
+ ETHTOOL_MSG_EEE_GET_REPLY = 24,
+ ETHTOOL_MSG_EEE_NTF = 25,
+ ETHTOOL_MSG_TSINFO_GET_REPLY = 26,
+ ETHTOOL_MSG_CABLE_TEST_NTF = 27,
+ ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28,
+ __ETHTOOL_MSG_KERNEL_CNT = 29,
+ ETHTOOL_MSG_KERNEL_MAX = 28,
+};
+
+struct ethtool_link_usettings {
+ struct ethtool_link_settings base;
+ struct {
+ __u32 supported[3];
+ __u32 advertising[3];
+ __u32 lp_advertising[3];
+ } link_modes;
+};
+
+struct ethtool_rx_flow_key {
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_eth_addrs eth_addrs;
+ long: 48;
+};
+
+struct ethtool_rx_flow_match {
+ struct flow_dissector dissector;
+ int: 32;
+ struct ethtool_rx_flow_key key;
+ struct ethtool_rx_flow_key mask;
+};
+
+enum {
+ ETHTOOL_MSG_USER_NONE = 0,
+ ETHTOOL_MSG_STRSET_GET = 1,
+ ETHTOOL_MSG_LINKINFO_GET = 2,
+ ETHTOOL_MSG_LINKINFO_SET = 3,
+ ETHTOOL_MSG_LINKMODES_GET = 4,
+ ETHTOOL_MSG_LINKMODES_SET = 5,
+ ETHTOOL_MSG_LINKSTATE_GET = 6,
+ ETHTOOL_MSG_DEBUG_GET = 7,
+ ETHTOOL_MSG_DEBUG_SET = 8,
+ ETHTOOL_MSG_WOL_GET = 9,
+ ETHTOOL_MSG_WOL_SET = 10,
+ ETHTOOL_MSG_FEATURES_GET = 11,
+ ETHTOOL_MSG_FEATURES_SET = 12,
+ ETHTOOL_MSG_PRIVFLAGS_GET = 13,
+ ETHTOOL_MSG_PRIVFLAGS_SET = 14,
+ ETHTOOL_MSG_RINGS_GET = 15,
+ ETHTOOL_MSG_RINGS_SET = 16,
+ ETHTOOL_MSG_CHANNELS_GET = 17,
+ ETHTOOL_MSG_CHANNELS_SET = 18,
+ ETHTOOL_MSG_COALESCE_GET = 19,
+ ETHTOOL_MSG_COALESCE_SET = 20,
+ ETHTOOL_MSG_PAUSE_GET = 21,
+ ETHTOOL_MSG_PAUSE_SET = 22,
+ ETHTOOL_MSG_EEE_GET = 23,
+ ETHTOOL_MSG_EEE_SET = 24,
+ ETHTOOL_MSG_TSINFO_GET = 25,
+ ETHTOOL_MSG_CABLE_TEST_ACT = 26,
+ ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27,
+ __ETHTOOL_MSG_USER_CNT = 28,
+ ETHTOOL_MSG_USER_MAX = 27,
+};
+
+enum {
+ ETHTOOL_A_HEADER_UNSPEC = 0,
+ ETHTOOL_A_HEADER_DEV_INDEX = 1,
+ ETHTOOL_A_HEADER_DEV_NAME = 2,
+ ETHTOOL_A_HEADER_FLAGS = 3,
+ __ETHTOOL_A_HEADER_CNT = 4,
+ ETHTOOL_A_HEADER_MAX = 3,
+};
+
+enum ethtool_multicast_groups {
+ ETHNL_MCGRP_MONITOR = 0,
+};
+
+struct ethnl_req_info {
+ struct net_device *dev;
+ u32 flags;
+};
+
+struct ethnl_reply_data {
+ struct net_device *dev;
+};
+
+struct ethnl_request_ops {
+ u8 request_cmd;
+ u8 reply_cmd;
+ u16 hdr_attr;
+ unsigned int max_attr;
+ unsigned int req_info_size;
+ unsigned int reply_data_size;
+ const struct nla_policy *request_policy;
+ bool allow_nodev_do;
+ int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *);
+ int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, struct genl_info *);
+ int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *);
+ int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *);
+ void (*cleanup_data)(struct ethnl_reply_data *);
+};
+
+struct ethnl_dump_ctx {
+ const struct ethnl_request_ops *ops;
+ struct ethnl_req_info *req_info;
+ struct ethnl_reply_data *reply_data;
+ int pos_hash;
+ int pos_idx;
+};
+
+typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *);
+
+enum {
+ ETHTOOL_A_BITSET_BIT_UNSPEC = 0,
+ ETHTOOL_A_BITSET_BIT_INDEX = 1,
+ ETHTOOL_A_BITSET_BIT_NAME = 2,
+ ETHTOOL_A_BITSET_BIT_VALUE = 3,
+ __ETHTOOL_A_BITSET_BIT_CNT = 4,
+ ETHTOOL_A_BITSET_BIT_MAX = 3,
+};
+
+enum {
+ ETHTOOL_A_BITSET_BITS_UNSPEC = 0,
+ ETHTOOL_A_BITSET_BITS_BIT = 1,
+ __ETHTOOL_A_BITSET_BITS_CNT = 2,
+ ETHTOOL_A_BITSET_BITS_MAX = 1,
+};
+
+enum {
+ ETHTOOL_A_BITSET_UNSPEC = 0,
+ ETHTOOL_A_BITSET_NOMASK = 1,
+ ETHTOOL_A_BITSET_SIZE = 2,
+ ETHTOOL_A_BITSET_BITS = 3,
+ ETHTOOL_A_BITSET_VALUE = 4,
+ ETHTOOL_A_BITSET_MASK = 5,
+ __ETHTOOL_A_BITSET_CNT = 6,
+ ETHTOOL_A_BITSET_MAX = 5,
+};
+
+typedef const char (* const ethnl_string_array_t)[32];
+
+enum {
+ ETHTOOL_A_STRING_UNSPEC = 0,
+ ETHTOOL_A_STRING_INDEX = 1,
+ ETHTOOL_A_STRING_VALUE = 2,
+ __ETHTOOL_A_STRING_CNT = 3,
+ ETHTOOL_A_STRING_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_STRINGS_UNSPEC = 0,
+ ETHTOOL_A_STRINGS_STRING = 1,
+ __ETHTOOL_A_STRINGS_CNT = 2,
+ ETHTOOL_A_STRINGS_MAX = 1,
+};
+
+enum {
+ ETHTOOL_A_STRINGSET_UNSPEC = 0,
+ ETHTOOL_A_STRINGSET_ID = 1,
+ ETHTOOL_A_STRINGSET_COUNT = 2,
+ ETHTOOL_A_STRINGSET_STRINGS = 3,
+ __ETHTOOL_A_STRINGSET_CNT = 4,
+ ETHTOOL_A_STRINGSET_MAX = 3,
+};
+
+enum {
+ ETHTOOL_A_STRINGSETS_UNSPEC = 0,
+ ETHTOOL_A_STRINGSETS_STRINGSET = 1,
+ __ETHTOOL_A_STRINGSETS_CNT = 2,
+ ETHTOOL_A_STRINGSETS_MAX = 1,
+};
+
+enum {
+ ETHTOOL_A_STRSET_UNSPEC = 0,
+ ETHTOOL_A_STRSET_HEADER = 1,
+ ETHTOOL_A_STRSET_STRINGSETS = 2,
+ ETHTOOL_A_STRSET_COUNTS_ONLY = 3,
+ __ETHTOOL_A_STRSET_CNT = 4,
+ ETHTOOL_A_STRSET_MAX = 3,
+};
+
+struct strset_info {
+ bool per_dev;
+ bool free_strings;
+ unsigned int count;
+ const char (*strings)[32];
+};
+
+struct strset_req_info {
+ struct ethnl_req_info base;
+ u32 req_ids;
+ bool counts_only;
+};
+
+struct strset_reply_data {
+ struct ethnl_reply_data base;
+ struct strset_info sets[15];
+};
+
+enum {
+ ETHTOOL_A_LINKINFO_UNSPEC = 0,
+ ETHTOOL_A_LINKINFO_HEADER = 1,
+ ETHTOOL_A_LINKINFO_PORT = 2,
+ ETHTOOL_A_LINKINFO_PHYADDR = 3,
+ ETHTOOL_A_LINKINFO_TP_MDIX = 4,
+ ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5,
+ ETHTOOL_A_LINKINFO_TRANSCEIVER = 6,
+ __ETHTOOL_A_LINKINFO_CNT = 7,
+ ETHTOOL_A_LINKINFO_MAX = 6,
+};
+
+struct linkinfo_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_link_ksettings ksettings;
+ struct ethtool_link_settings *lsettings;
+};
+
+enum {
+ ETHTOOL_A_LINKMODES_UNSPEC = 0,
+ ETHTOOL_A_LINKMODES_HEADER = 1,
+ ETHTOOL_A_LINKMODES_AUTONEG = 2,
+ ETHTOOL_A_LINKMODES_OURS = 3,
+ ETHTOOL_A_LINKMODES_PEER = 4,
+ ETHTOOL_A_LINKMODES_SPEED = 5,
+ ETHTOOL_A_LINKMODES_DUPLEX = 6,
+ ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7,
+ ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8,
+ __ETHTOOL_A_LINKMODES_CNT = 9,
+ ETHTOOL_A_LINKMODES_MAX = 8,
+};
+
+struct linkmodes_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_link_ksettings ksettings;
+ struct ethtool_link_settings *lsettings;
+ bool peer_empty;
+};
+
+struct link_mode_info {
+ int speed;
+ u8 duplex;
+};
+
+enum {
+ ETHTOOL_A_LINKSTATE_UNSPEC = 0,
+ ETHTOOL_A_LINKSTATE_HEADER = 1,
+ ETHTOOL_A_LINKSTATE_LINK = 2,
+ ETHTOOL_A_LINKSTATE_SQI = 3,
+ ETHTOOL_A_LINKSTATE_SQI_MAX = 4,
+ __ETHTOOL_A_LINKSTATE_CNT = 5,
+ ETHTOOL_A_LINKSTATE_MAX = 4,
+};
+
+struct linkstate_reply_data {
+ struct ethnl_reply_data base;
+ int link;
+ int sqi;
+ int sqi_max;
+};
+
+enum {
+ ETHTOOL_A_DEBUG_UNSPEC = 0,
+ ETHTOOL_A_DEBUG_HEADER = 1,
+ ETHTOOL_A_DEBUG_MSGMASK = 2,
+ __ETHTOOL_A_DEBUG_CNT = 3,
+ ETHTOOL_A_DEBUG_MAX = 2,
+};
+
+struct debug_reply_data {
+ struct ethnl_reply_data base;
+ u32 msg_mask;
+};
+
+enum {
+ ETHTOOL_A_WOL_UNSPEC = 0,
+ ETHTOOL_A_WOL_HEADER = 1,
+ ETHTOOL_A_WOL_MODES = 2,
+ ETHTOOL_A_WOL_SOPASS = 3,
+ __ETHTOOL_A_WOL_CNT = 4,
+ ETHTOOL_A_WOL_MAX = 3,
+};
+
+struct wol_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_wolinfo wol;
+ bool show_sopass;
+};
+
+enum {
+ ETHTOOL_A_FEATURES_UNSPEC = 0,
+ ETHTOOL_A_FEATURES_HEADER = 1,
+ ETHTOOL_A_FEATURES_HW = 2,
+ ETHTOOL_A_FEATURES_WANTED = 3,
+ ETHTOOL_A_FEATURES_ACTIVE = 4,
+ ETHTOOL_A_FEATURES_NOCHANGE = 5,
+ __ETHTOOL_A_FEATURES_CNT = 6,
+ ETHTOOL_A_FEATURES_MAX = 5,
+};
+
+struct features_reply_data {
+ struct ethnl_reply_data base;
+ u32 hw[2];
+ u32 wanted[2];
+ u32 active[2];
+ u32 nochange[2];
+ u32 all[2];
+};
+
+enum {
+ ETHTOOL_A_PRIVFLAGS_UNSPEC = 0,
+ ETHTOOL_A_PRIVFLAGS_HEADER = 1,
+ ETHTOOL_A_PRIVFLAGS_FLAGS = 2,
+ __ETHTOOL_A_PRIVFLAGS_CNT = 3,
+ ETHTOOL_A_PRIVFLAGS_MAX = 2,
+};
+
+struct privflags_reply_data {
+ struct ethnl_reply_data base;
+ const char (*priv_flag_names)[32];
+ unsigned int n_priv_flags;
+ u32 priv_flags;
+};
+
+enum {
+ ETHTOOL_A_RINGS_UNSPEC = 0,
+ ETHTOOL_A_RINGS_HEADER = 1,
+ ETHTOOL_A_RINGS_RX_MAX = 2,
+ ETHTOOL_A_RINGS_RX_MINI_MAX = 3,
+ ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4,
+ ETHTOOL_A_RINGS_TX_MAX = 5,
+ ETHTOOL_A_RINGS_RX = 6,
+ ETHTOOL_A_RINGS_RX_MINI = 7,
+ ETHTOOL_A_RINGS_RX_JUMBO = 8,
+ ETHTOOL_A_RINGS_TX = 9,
+ __ETHTOOL_A_RINGS_CNT = 10,
+ ETHTOOL_A_RINGS_MAX = 9,
+};
+
+struct rings_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_ringparam ringparam;
+};
+
+enum {
+ ETHTOOL_A_CHANNELS_UNSPEC = 0,
+ ETHTOOL_A_CHANNELS_HEADER = 1,
+ ETHTOOL_A_CHANNELS_RX_MAX = 2,
+ ETHTOOL_A_CHANNELS_TX_MAX = 3,
+ ETHTOOL_A_CHANNELS_OTHER_MAX = 4,
+ ETHTOOL_A_CHANNELS_COMBINED_MAX = 5,
+ ETHTOOL_A_CHANNELS_RX_COUNT = 6,
+ ETHTOOL_A_CHANNELS_TX_COUNT = 7,
+ ETHTOOL_A_CHANNELS_OTHER_COUNT = 8,
+ ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9,
+ __ETHTOOL_A_CHANNELS_CNT = 10,
+ ETHTOOL_A_CHANNELS_MAX = 9,
+};
+
+struct channels_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_channels channels;
+};
+
+enum {
+ ETHTOOL_A_COALESCE_UNSPEC = 0,
+ ETHTOOL_A_COALESCE_HEADER = 1,
+ ETHTOOL_A_COALESCE_RX_USECS = 2,
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3,
+ ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4,
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5,
+ ETHTOOL_A_COALESCE_TX_USECS = 6,
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7,
+ ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8,
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9,
+ ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10,
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11,
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12,
+ ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13,
+ ETHTOOL_A_COALESCE_RX_USECS_LOW = 14,
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15,
+ ETHTOOL_A_COALESCE_TX_USECS_LOW = 16,
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17,
+ ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18,
+ ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19,
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20,
+ ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21,
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22,
+ ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23,
+ __ETHTOOL_A_COALESCE_CNT = 24,
+ ETHTOOL_A_COALESCE_MAX = 23,
+};
+
+struct coalesce_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_coalesce coalesce;
+ u32 supported_params;
+};
+
+enum {
+ ETHTOOL_A_PAUSE_UNSPEC = 0,
+ ETHTOOL_A_PAUSE_HEADER = 1,
+ ETHTOOL_A_PAUSE_AUTONEG = 2,
+ ETHTOOL_A_PAUSE_RX = 3,
+ ETHTOOL_A_PAUSE_TX = 4,
+ __ETHTOOL_A_PAUSE_CNT = 5,
+ ETHTOOL_A_PAUSE_MAX = 4,
+};
+
+struct pause_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_pauseparam pauseparam;
+};
+
+enum {
+ ETHTOOL_A_EEE_UNSPEC = 0,
+ ETHTOOL_A_EEE_HEADER = 1,
+ ETHTOOL_A_EEE_MODES_OURS = 2,
+ ETHTOOL_A_EEE_MODES_PEER = 3,
+ ETHTOOL_A_EEE_ACTIVE = 4,
+ ETHTOOL_A_EEE_ENABLED = 5,
+ ETHTOOL_A_EEE_TX_LPI_ENABLED = 6,
+ ETHTOOL_A_EEE_TX_LPI_TIMER = 7,
+ __ETHTOOL_A_EEE_CNT = 8,
+ ETHTOOL_A_EEE_MAX = 7,
+};
+
+struct eee_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_eee eee;
+};
+
+enum {
+ ETHTOOL_A_TSINFO_UNSPEC = 0,
+ ETHTOOL_A_TSINFO_HEADER = 1,
+ ETHTOOL_A_TSINFO_TIMESTAMPING = 2,
+ ETHTOOL_A_TSINFO_TX_TYPES = 3,
+ ETHTOOL_A_TSINFO_RX_FILTERS = 4,
+ ETHTOOL_A_TSINFO_PHC_INDEX = 5,
+ __ETHTOOL_A_TSINFO_CNT = 6,
+ ETHTOOL_A_TSINFO_MAX = 5,
+};
+
+struct tsinfo_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_ts_info ts_info;
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TEST_HEADER = 1,
+ __ETHTOOL_A_CABLE_TEST_CNT = 2,
+ ETHTOOL_A_CABLE_TEST_MAX = 1,
+};
+
+enum {
+ ETHTOOL_A_CABLE_PAIR_A = 0,
+ ETHTOOL_A_CABLE_PAIR_B = 1,
+ ETHTOOL_A_CABLE_PAIR_C = 2,
+ ETHTOOL_A_CABLE_PAIR_D = 3,
+};
+
+enum {
+ ETHTOOL_A_CABLE_RESULT_UNSPEC = 0,
+ ETHTOOL_A_CABLE_RESULT_PAIR = 1,
+ ETHTOOL_A_CABLE_RESULT_CODE = 2,
+ __ETHTOOL_A_CABLE_RESULT_CNT = 3,
+ ETHTOOL_A_CABLE_RESULT_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0,
+ ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1,
+ ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2,
+ __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 3,
+ ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1,
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_NEST_UNSPEC = 0,
+ ETHTOOL_A_CABLE_NEST_RESULT = 1,
+ ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2,
+ __ETHTOOL_A_CABLE_NEST_CNT = 3,
+ ETHTOOL_A_CABLE_NEST_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1,
+ ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2,
+ ETHTOOL_A_CABLE_TEST_NTF_NEST = 3,
+ __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4,
+ ETHTOOL_A_CABLE_TEST_NTF_MAX = 3,
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4,
+ __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4,
+};
+
+enum {
+ ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1,
+ ETHTOOL_A_CABLE_TEST_TDR_CFG = 2,
+ __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3,
+ ETHTOOL_A_CABLE_TEST_TDR_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0,
+ ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1,
+ ETHTOOL_A_CABLE_AMPLITUDE_mV = 2,
+ __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3,
+ ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2,
+};
+
+enum {
+ ETHTOOL_A_CABLE_PULSE_UNSPEC = 0,
+ ETHTOOL_A_CABLE_PULSE_mV = 1,
+ __ETHTOOL_A_CABLE_PULSE_CNT = 2,
+ ETHTOOL_A_CABLE_PULSE_MAX = 1,
+};
+
+enum {
+ ETHTOOL_A_CABLE_STEP_UNSPEC = 0,
+ ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1,
+ ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2,
+ ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3,
+ __ETHTOOL_A_CABLE_STEP_CNT = 4,
+ ETHTOOL_A_CABLE_STEP_MAX = 3,
+};
+
+enum {
+ ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0,
+ ETHTOOL_A_CABLE_TDR_NEST_STEP = 1,
+ ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2,
+ ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3,
+ __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4,
+ ETHTOOL_A_CABLE_TDR_NEST_MAX = 3,
+};
+
+struct nf_hook_ops {
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ int priority;
+};
+
+struct nf_hook_entries_rcu_head {
+ struct callback_head head;
+ void *allocation;
+};
+
+struct nf_ipv6_ops {
+ void (*route_input)(struct sk_buff *);
+ int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *));
+ int (*reroute)(struct sk_buff *, const struct nf_queue_entry *);
+};
+
+struct nf_queue_entry {
+ struct list_head list;
+ struct sk_buff *skb;
+ unsigned int id;
+ unsigned int hook_index;
+ struct nf_hook_state state;
+ u16 size;
+};
+
+struct nf_loginfo {
+ u_int8_t type;
+ union {
+ struct {
+ u_int32_t copy_len;
+ u_int16_t group;
+ u_int16_t qthreshold;
+ u_int16_t flags;
+ } ulog;
+ struct {
+ u_int8_t level;
+ u_int8_t logflags;
+ } log;
+ } u;
+};
+
+struct nf_log_buf {
+ unsigned int count;
+ char buf[1020];
+};
+
+struct ip_rt_info {
+ __be32 daddr;
+ __be32 saddr;
+ u_int8_t tos;
+ u_int32_t mark;
+};
+
+struct ip6_rt_info {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ u_int32_t mark;
+};
+
+struct nf_sockopt_ops {
+ struct list_head list;
+ u_int8_t pf;
+ int set_optmin;
+ int set_optmax;
+ int (*set)(struct sock *, int, void *, unsigned int);
+ int get_optmin;
+ int get_optmax;
+ int (*get)(struct sock *, int, void *, int *);
+ struct module *owner;
+};
+
+enum nfnetlink_groups {
+ NFNLGRP_NONE = 0,
+ NFNLGRP_CONNTRACK_NEW = 1,
+ NFNLGRP_CONNTRACK_UPDATE = 2,
+ NFNLGRP_CONNTRACK_DESTROY = 3,
+ NFNLGRP_CONNTRACK_EXP_NEW = 4,
+ NFNLGRP_CONNTRACK_EXP_UPDATE = 5,
+ NFNLGRP_CONNTRACK_EXP_DESTROY = 6,
+ NFNLGRP_NFTABLES = 7,
+ NFNLGRP_ACCT_QUOTA = 8,
+ NFNLGRP_NFTRACE = 9,
+ __NFNLGRP_MAX = 10,
+};
+
+struct nfgenmsg {
+ __u8 nfgen_family;
+ __u8 version;
+ __be16 res_id;
+};
+
+enum nfnl_batch_attributes {
+ NFNL_BATCH_UNSPEC = 0,
+ NFNL_BATCH_GENID = 1,
+ __NFNL_BATCH_MAX = 2,
+};
+
+struct nfnl_callback {
+ int (*call)(struct net *, struct sock *, struct sk_buff *, const struct nlmsghdr *, const struct nlattr * const *, struct netlink_ext_ack *);
+ int (*call_rcu)(struct net *, struct sock *, struct sk_buff *, const struct nlmsghdr *, const struct nlattr * const *, struct netlink_ext_ack *);
+ int (*call_batch)(struct net *, struct sock *, struct sk_buff *, const struct nlmsghdr *, const struct nlattr * const *, struct netlink_ext_ack *);
+ const struct nla_policy *policy;
+ const u_int16_t attr_count;
+};
+
+struct nfnetlink_subsystem {
+ const char *name;
+ __u8 subsys_id;
+ __u8 cb_count;
+ const struct nfnl_callback *cb;
+ struct module *owner;
+ int (*commit)(struct net *, struct sk_buff *);
+ int (*abort)(struct net *, struct sk_buff *, bool);
+ void (*cleanup)(struct net *);
+ bool (*valid_genid)(struct net *, u32);
+};
+
+struct nfnl_err {
+ struct list_head head;
+ struct nlmsghdr *nlh;
+ int err;
+ struct netlink_ext_ack extack;
+};
+
+enum {
+ NFNL_BATCH_FAILURE = 1,
+ NFNL_BATCH_DONE = 2,
+ NFNL_BATCH_REPLAY = 4,
+};
+
+enum nfnl_acct_msg_types {
+ NFNL_MSG_ACCT_NEW = 0,
+ NFNL_MSG_ACCT_GET = 1,
+ NFNL_MSG_ACCT_GET_CTRZERO = 2,
+ NFNL_MSG_ACCT_DEL = 3,
+ NFNL_MSG_ACCT_OVERQUOTA = 4,
+ NFNL_MSG_ACCT_MAX = 5,
+};
+
+enum nfnl_acct_flags {
+ NFACCT_F_QUOTA_PKTS = 1,
+ NFACCT_F_QUOTA_BYTES = 2,
+ NFACCT_F_OVERQUOTA = 4,
+};
+
+enum nfnl_acct_type {
+ NFACCT_UNSPEC = 0,
+ NFACCT_NAME = 1,
+ NFACCT_PKTS = 2,
+ NFACCT_BYTES = 3,
+ NFACCT_USE = 4,
+ NFACCT_FLAGS = 5,
+ NFACCT_QUOTA = 6,
+ NFACCT_FILTER = 7,
+ NFACCT_PAD = 8,
+ __NFACCT_MAX = 9,
+};
+
+enum nfnl_attr_filter_type {
+ NFACCT_FILTER_UNSPEC = 0,
+ NFACCT_FILTER_MASK = 1,
+ NFACCT_FILTER_VALUE = 2,
+ __NFACCT_FILTER_MAX = 3,
+};
+
+enum {
+ NFACCT_NO_QUOTA = -1,
+ NFACCT_UNDERQUOTA = 0,
+ NFACCT_OVERQUOTA = 1,
+};
+
+struct nf_acct {
+ atomic64_t pkts;
+ atomic64_t bytes;
+ long unsigned int flags;
+ struct list_head head;
+ refcount_t refcnt;
+ char name[32];
+ struct callback_head callback_head;
+ char data[0];
+};
+
+struct nfacct_filter {
+ u32 value;
+ u32 mask;
+};
+
+enum ip_conntrack_status {
+ IPS_EXPECTED_BIT = 0,
+ IPS_EXPECTED = 1,
+ IPS_SEEN_REPLY_BIT = 1,
+ IPS_SEEN_REPLY = 2,
+ IPS_ASSURED_BIT = 2,
+ IPS_ASSURED = 4,
+ IPS_CONFIRMED_BIT = 3,
+ IPS_CONFIRMED = 8,
+ IPS_SRC_NAT_BIT = 4,
+ IPS_SRC_NAT = 16,
+ IPS_DST_NAT_BIT = 5,
+ IPS_DST_NAT = 32,
+ IPS_NAT_MASK = 48,
+ IPS_SEQ_ADJUST_BIT = 6,
+ IPS_SEQ_ADJUST = 64,
+ IPS_SRC_NAT_DONE_BIT = 7,
+ IPS_SRC_NAT_DONE = 128,
+ IPS_DST_NAT_DONE_BIT = 8,
+ IPS_DST_NAT_DONE = 256,
+ IPS_NAT_DONE_MASK = 384,
+ IPS_DYING_BIT = 9,
+ IPS_DYING = 512,
+ IPS_FIXED_TIMEOUT_BIT = 10,
+ IPS_FIXED_TIMEOUT = 1024,
+ IPS_TEMPLATE_BIT = 11,
+ IPS_TEMPLATE = 2048,
+ IPS_UNTRACKED_BIT = 12,
+ IPS_UNTRACKED = 4096,
+ IPS_NAT_CLASH_BIT = 12,
+ IPS_NAT_CLASH = 4096,
+ IPS_HELPER_BIT = 13,
+ IPS_HELPER = 8192,
+ IPS_OFFLOAD_BIT = 14,
+ IPS_OFFLOAD = 16384,
+ IPS_HW_OFFLOAD_BIT = 15,
+ IPS_HW_OFFLOAD = 32768,
+ IPS_UNCHANGEABLE_MASK = 56313,
+ __IPS_MAX_BIT = 16,
+};
+
+enum nfqnl_msg_types {
+ NFQNL_MSG_PACKET = 0,
+ NFQNL_MSG_VERDICT = 1,
+ NFQNL_MSG_CONFIG = 2,
+ NFQNL_MSG_VERDICT_BATCH = 3,
+ NFQNL_MSG_MAX = 4,
+};
+
+struct nfqnl_msg_packet_hdr {
+ __be32 packet_id;
+ __be16 hw_protocol;
+ __u8 hook;
+} __attribute__((packed));
+
+struct nfqnl_msg_packet_hw {
+ __be16 hw_addrlen;
+ __u16 _pad;
+ __u8 hw_addr[8];
+};
+
+struct nfqnl_msg_packet_timestamp {
+ __be64 sec;
+ __be64 usec;
+};
+
+enum nfqnl_vlan_attr {
+ NFQA_VLAN_UNSPEC = 0,
+ NFQA_VLAN_PROTO = 1,
+ NFQA_VLAN_TCI = 2,
+ __NFQA_VLAN_MAX = 3,
+};
+
+enum nfqnl_attr_type {
+ NFQA_UNSPEC = 0,
+ NFQA_PACKET_HDR = 1,
+ NFQA_VERDICT_HDR = 2,
+ NFQA_MARK = 3,
+ NFQA_TIMESTAMP = 4,
+ NFQA_IFINDEX_INDEV = 5,
+ NFQA_IFINDEX_OUTDEV = 6,
+ NFQA_IFINDEX_PHYSINDEV = 7,
+ NFQA_IFINDEX_PHYSOUTDEV = 8,
+ NFQA_HWADDR = 9,
+ NFQA_PAYLOAD = 10,
+ NFQA_CT = 11,
+ NFQA_CT_INFO = 12,
+ NFQA_CAP_LEN = 13,
+ NFQA_SKB_INFO = 14,
+ NFQA_EXP = 15,
+ NFQA_UID = 16,
+ NFQA_GID = 17,
+ NFQA_SECCTX = 18,
+ NFQA_VLAN = 19,
+ NFQA_L2HDR = 20,
+ __NFQA_MAX = 21,
+};
+
+struct nfqnl_msg_verdict_hdr {
+ __be32 verdict;
+ __be32 id;
+};
+
+enum nfqnl_msg_config_cmds {
+ NFQNL_CFG_CMD_NONE = 0,
+ NFQNL_CFG_CMD_BIND = 1,
+ NFQNL_CFG_CMD_UNBIND = 2,
+ NFQNL_CFG_CMD_PF_BIND = 3,
+ NFQNL_CFG_CMD_PF_UNBIND = 4,
+};
+
+struct nfqnl_msg_config_cmd {
+ __u8 command;
+ __u8 _pad;
+ __be16 pf;
+};
+
+enum nfqnl_config_mode {
+ NFQNL_COPY_NONE = 0,
+ NFQNL_COPY_META = 1,
+ NFQNL_COPY_PACKET = 2,
+};
+
+struct nfqnl_msg_config_params {
+ __be32 copy_range;
+ __u8 copy_mode;
+} __attribute__((packed));
+
+enum nfqnl_attr_config {
+ NFQA_CFG_UNSPEC = 0,
+ NFQA_CFG_CMD = 1,
+ NFQA_CFG_PARAMS = 2,
+ NFQA_CFG_QUEUE_MAXLEN = 3,
+ NFQA_CFG_MASK = 4,
+ NFQA_CFG_FLAGS = 5,
+ __NFQA_CFG_MAX = 6,
+};
+
+struct nfqnl_instance {
+ struct hlist_node hlist;
+ struct callback_head rcu;
+ u32 peer_portid;
+ unsigned int queue_maxlen;
+ unsigned int copy_range;
+ unsigned int queue_dropped;
+ unsigned int queue_user_dropped;
+ u_int16_t queue_num;
+ u_int8_t copy_mode;
+ u_int32_t flags;
+ spinlock_t lock;
+ unsigned int queue_total;
+ unsigned int id_sequence;
+ struct list_head queue_list;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int);
+
+struct nfnl_queue_net {
+ spinlock_t instances_lock;
+ struct hlist_head instance_table[16];
+};
+
+struct iter_state {
+ struct seq_net_private p;
+ unsigned int bucket;
+};
+
+enum nfulnl_msg_types {
+ NFULNL_MSG_PACKET = 0,
+ NFULNL_MSG_CONFIG = 1,
+ NFULNL_MSG_MAX = 2,
+};
+
+struct nfulnl_msg_packet_hdr {
+ __be16 hw_protocol;
+ __u8 hook;
+ __u8 _pad;
+};
+
+struct nfulnl_msg_packet_hw {
+ __be16 hw_addrlen;
+ __u16 _pad;
+ __u8 hw_addr[8];
+};
+
+struct nfulnl_msg_packet_timestamp {
+ __be64 sec;
+ __be64 usec;
+};
+
+enum nfulnl_vlan_attr {
+ NFULA_VLAN_UNSPEC = 0,
+ NFULA_VLAN_PROTO = 1,
+ NFULA_VLAN_TCI = 2,
+ __NFULA_VLAN_MAX = 3,
+};
+
+enum nfulnl_attr_type {
+ NFULA_UNSPEC = 0,
+ NFULA_PACKET_HDR = 1,
+ NFULA_MARK = 2,
+ NFULA_TIMESTAMP = 3,
+ NFULA_IFINDEX_INDEV = 4,
+ NFULA_IFINDEX_OUTDEV = 5,
+ NFULA_IFINDEX_PHYSINDEV = 6,
+ NFULA_IFINDEX_PHYSOUTDEV = 7,
+ NFULA_HWADDR = 8,
+ NFULA_PAYLOAD = 9,
+ NFULA_PREFIX = 10,
+ NFULA_UID = 11,
+ NFULA_SEQ = 12,
+ NFULA_SEQ_GLOBAL = 13,
+ NFULA_GID = 14,
+ NFULA_HWTYPE = 15,
+ NFULA_HWHEADER = 16,
+ NFULA_HWLEN = 17,
+ NFULA_CT = 18,
+ NFULA_CT_INFO = 19,
+ NFULA_VLAN = 20,
+ NFULA_L2HDR = 21,
+ __NFULA_MAX = 22,
+};
+
+enum nfulnl_msg_config_cmds {
+ NFULNL_CFG_CMD_NONE = 0,
+ NFULNL_CFG_CMD_BIND = 1,
+ NFULNL_CFG_CMD_UNBIND = 2,
+ NFULNL_CFG_CMD_PF_BIND = 3,
+ NFULNL_CFG_CMD_PF_UNBIND = 4,
+};
+
+struct nfulnl_msg_config_cmd {
+ __u8 command;
+};
+
+struct nfulnl_msg_config_mode {
+ __be32 copy_range;
+ __u8 copy_mode;
+ __u8 _pad;
+} __attribute__((packed));
+
+enum nfulnl_attr_config {
+ NFULA_CFG_UNSPEC = 0,
+ NFULA_CFG_CMD = 1,
+ NFULA_CFG_MODE = 2,
+ NFULA_CFG_NLBUFSIZ = 3,
+ NFULA_CFG_TIMEOUT = 4,
+ NFULA_CFG_QTHRESH = 5,
+ NFULA_CFG_FLAGS = 6,
+ __NFULA_CFG_MAX = 7,
+};
+
+struct nfulnl_instance {
+ struct hlist_node hlist;
+ spinlock_t lock;
+ refcount_t use;
+ unsigned int qlen;
+ struct sk_buff *skb;
+ struct timer_list timer;
+ struct net *net;
+ struct user_namespace *peer_user_ns;
+ u32 peer_portid;
+ unsigned int flushtimeout;
+ unsigned int nlbufsiz;
+ unsigned int qthreshold;
+ u_int32_t copy_range;
+ u_int32_t seq;
+ u_int16_t group_num;
+ u_int16_t flags;
+ u_int8_t copy_mode;
+ struct callback_head rcu;
+};
+
+struct nfnl_log_net {
+ spinlock_t instances_lock;
+ struct hlist_head instance_table[16];
+ atomic_t global_seq;
+};
+
+struct nf_osf_wc {
+ __u32 wc;
+ __u32 val;
+};
+
+struct nf_osf_opt {
+ __u16 kind;
+ __u16 length;
+ struct nf_osf_wc wc;
+};
+
+struct nf_osf_info {
+ char genre[32];
+ __u32 len;
+ __u32 flags;
+ __u32 loglevel;
+ __u32 ttl;
+};
+
+struct nf_osf_user_finger {
+ struct nf_osf_wc wss;
+ __u8 ttl;
+ __u8 df;
+ __u16 ss;
+ __u16 mss;
+ __u16 opt_num;
+ char genre[32];
+ char version[32];
+ char subtype[32];
+ struct nf_osf_opt opt[40];
+};
+
+enum iana_options {
+ OSFOPT_EOL = 0,
+ OSFOPT_NOP = 1,
+ OSFOPT_MSS = 2,
+ OSFOPT_WSO = 3,
+ OSFOPT_SACKP = 4,
+ OSFOPT_SACK = 5,
+ OSFOPT_ECHO = 6,
+ OSFOPT_ECHOREPLY = 7,
+ OSFOPT_TS = 8,
+ OSFOPT_POCP = 9,
+ OSFOPT_POSP = 10,
+ OSFOPT_EMPTY = 255,
+};
+
+enum nf_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS = 1,
+ OSF_WSS_MTU = 2,
+ OSF_WSS_MODULO = 3,
+ OSF_WSS_MAX = 4,
+};
+
+enum nf_osf_attr_type {
+ OSF_ATTR_UNSPEC = 0,
+ OSF_ATTR_FINGER = 1,
+ OSF_ATTR_MAX = 2,
+};
+
+enum nf_osf_msg_types {
+ OSF_MSG_ADD = 0,
+ OSF_MSG_REMOVE = 1,
+ OSF_MSG_MAX = 2,
+};
+
+enum osf_fmatch_states {
+ FMATCH_WRONG = 0,
+ FMATCH_OK = 1,
+ FMATCH_OPT_WRONG = 2,
+};
+
+struct nf_osf_finger {
+ struct callback_head callback_head;
+ struct list_head finger_entry;
+ struct nf_osf_user_finger finger;
+};
+
+struct nf_osf_data {
+ const char *genre;
+ const char *version;
+};
+
+struct nf_osf_hdr_ctx {
+ bool df;
+ u16 window;
+ u16 totlen;
+ const unsigned char *optp;
+ unsigned int optsize;
+};
+
+enum ip_conntrack_events {
+ IPCT_NEW = 0,
+ IPCT_RELATED = 1,
+ IPCT_DESTROY = 2,
+ IPCT_REPLY = 3,
+ IPCT_ASSURED = 4,
+ IPCT_PROTOINFO = 5,
+ IPCT_HELPER = 6,
+ IPCT_MARK = 7,
+ IPCT_SEQADJ = 8,
+ IPCT_NATSEQADJ = 8,
+ IPCT_SECMARK = 9,
+ IPCT_LABEL = 10,
+ IPCT_SYNPROXY = 11,
+ __IPCT_MAX = 12,
+};
+
+struct nf_conntrack_expect_policy;
+
+struct nf_conntrack_helper {
+ struct hlist_node hnode;
+ char name[16];
+ refcount_t refcnt;
+ struct module *me;
+ const struct nf_conntrack_expect_policy *expect_policy;
+ struct nf_conntrack_tuple tuple;
+ int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info);
+ void (*destroy)(struct nf_conn *);
+ int (*from_nlattr)(struct nlattr *, struct nf_conn *);
+ int (*to_nlattr)(struct sk_buff *, const struct nf_conn *);
+ unsigned int expect_class_max;
+ unsigned int flags;
+ unsigned int queue_num;
+ u16 data_len;
+ char nat_mod_name[16];
+};
+
+struct nf_conntrack_expect_policy {
+ unsigned int max_expected;
+ unsigned int timeout;
+ char name[16];
+};
+
+enum nf_ct_helper_flags {
+ NF_CT_HELPER_F_USERSPACE = 1,
+ NF_CT_HELPER_F_CONFIGURED = 2,
+};
+
+struct nf_conn_help {
+ struct nf_conntrack_helper *helper;
+ struct hlist_head expectations;
+ u8 expecting[4];
+ int: 32;
+ char data[32];
+};
+
+struct nf_ct_seqadj {
+ u32 correction_pos;
+ s32 offset_before;
+ s32 offset_after;
+};
+
+struct nf_conn_seqadj {
+ struct nf_ct_seqadj seq[2];
+};
+
+enum nf_ct_ecache_state {
+ NFCT_ECACHE_UNKNOWN = 0,
+ NFCT_ECACHE_DESTROY_FAIL = 1,
+ NFCT_ECACHE_DESTROY_SENT = 2,
+};
+
+struct nf_conntrack_ecache {
+ long unsigned int cache;
+ u16 missed;
+ u16 ctmask;
+ u16 expmask;
+ enum nf_ct_ecache_state state: 8;
+ u32 portid;
+};
+
+struct nf_conn_counter {
+ atomic64_t packets;
+ atomic64_t bytes;
+};
+
+struct nf_conn_acct {
+ struct nf_conn_counter counter[2];
+};
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+struct nf_ct_timeout {
+ __u16 l3num;
+ const struct nf_conntrack_l4proto *l4proto;
+ char data[0];
+};
+
+struct nf_conn_timeout {
+ struct nf_ct_timeout *timeout;
+};
+
+struct nf_conn_synproxy {
+ u32 isn;
+ u32 its;
+ u32 tsoff;
+};
+
+struct conntrack_gc_work {
+ struct delayed_work dwork;
+ u32 last_bucket;
+ bool exiting;
+ bool early_drop;
+ long int next_gc_run;
+};
+
+enum ctattr_l4proto {
+ CTA_PROTO_UNSPEC = 0,
+ CTA_PROTO_NUM = 1,
+ CTA_PROTO_SRC_PORT = 2,
+ CTA_PROTO_DST_PORT = 3,
+ CTA_PROTO_ICMP_ID = 4,
+ CTA_PROTO_ICMP_TYPE = 5,
+ CTA_PROTO_ICMP_CODE = 6,
+ CTA_PROTO_ICMPV6_ID = 7,
+ CTA_PROTO_ICMPV6_TYPE = 8,
+ CTA_PROTO_ICMPV6_CODE = 9,
+ __CTA_PROTO_MAX = 10,
+};
+
+struct iter_data {
+ int (*iter)(struct nf_conn *, void *);
+ void *data;
+ struct net *net;
+};
+
+enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_MAX = 0,
+ NF_SYSCTL_CT_COUNT = 1,
+ NF_SYSCTL_CT_BUCKETS = 2,
+ NF_SYSCTL_CT_CHECKSUM = 3,
+ NF_SYSCTL_CT_LOG_INVALID = 4,
+ NF_SYSCTL_CT_EXPECT_MAX = 5,
+ NF_SYSCTL_CT_ACCT = 6,
+ NF_SYSCTL_CT_HELPER = 7,
+ NF_SYSCTL_CT_EVENTS = 8,
+ NF_SYSCTL_CT_TIMESTAMP = 9,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 10,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 11,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 12,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 13,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 14,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 15,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 16,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 17,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 18,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 19,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 20,
+ NF_SYSCTL_CT_PROTO_TCP_LOOSE = 21,
+ NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 22,
+ NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 23,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 24,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 25,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 26,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 27,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED = 28,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT = 29,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED = 30,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED = 31,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT = 32,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD = 33,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT = 34,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT = 35,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED = 36,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST = 37,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND = 38,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN = 39,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN = 40,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ = 41,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING = 42,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT = 43,
+ NF_SYSCTL_CT_PROTO_DCCP_LOOSE = 44,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE = 45,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM = 46,
+ __NF_SYSCTL_CT_LAST_SYSCTL = 47,
+};
+
+enum ip_conntrack_expect_events {
+ IPEXP_NEW = 0,
+ IPEXP_DESTROY = 1,
+};
+
+struct nf_ct_ext_type {
+ void (*destroy)(struct nf_conn *);
+ enum nf_ct_ext_id id;
+ u8 len;
+ u8 align;
+};
+
+struct nf_ct_helper_expectfn {
+ struct list_head head;
+ const char *name;
+ void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *);
+};
+
+struct nf_conntrack_nat_helper {
+ struct list_head list;
+ char mod_name[16];
+ struct module *module;
+};
+
+enum nf_ip_hook_priorities {
+ NF_IP_PRI_FIRST = -2147483648,
+ NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
+ NF_IP_PRI_CONNTRACK_DEFRAG = -400,
+ NF_IP_PRI_RAW = -300,
+ NF_IP_PRI_SELINUX_FIRST = -225,
+ NF_IP_PRI_CONNTRACK = -200,
+ NF_IP_PRI_MANGLE = -150,
+ NF_IP_PRI_NAT_DST = -100,
+ NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_SECURITY = 50,
+ NF_IP_PRI_NAT_SRC = 100,
+ NF_IP_PRI_SELINUX_LAST = 225,
+ NF_IP_PRI_CONNTRACK_HELPER = 300,
+ NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647,
+ NF_IP_PRI_LAST = 2147483647,
+};
+
+struct nf_conntrack_net {
+ unsigned int users4;
+ unsigned int users6;
+ unsigned int users_bridge;
+};
+
+struct nf_ct_bridge_info {
+ struct nf_hook_ops *ops;
+ unsigned int ops_size;
+ struct module *me;
+};
+
+enum nf_ip6_hook_priorities {
+ NF_IP6_PRI_FIRST = -2147483648,
+ NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
+ NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
+ NF_IP6_PRI_RAW = -300,
+ NF_IP6_PRI_SELINUX_FIRST = -225,
+ NF_IP6_PRI_CONNTRACK = -200,
+ NF_IP6_PRI_MANGLE = -150,
+ NF_IP6_PRI_NAT_DST = -100,
+ NF_IP6_PRI_FILTER = 0,
+ NF_IP6_PRI_SECURITY = 50,
+ NF_IP6_PRI_NAT_SRC = 100,
+ NF_IP6_PRI_SELINUX_LAST = 225,
+ NF_IP6_PRI_CONNTRACK_HELPER = 300,
+ NF_IP6_PRI_LAST = 2147483647,
+};
+
+enum ctattr_timeout_generic {
+ CTA_TIMEOUT_GENERIC_UNSPEC = 0,
+ CTA_TIMEOUT_GENERIC_TIMEOUT = 1,
+ __CTA_TIMEOUT_GENERIC_MAX = 2,
+};
+
+struct nf_ct_tcp_flags {
+ __u8 flags;
+ __u8 mask;
+};
+
+enum {
+ TCP_FLAG_CWR = 32768,
+ TCP_FLAG_ECE = 16384,
+ TCP_FLAG_URG = 8192,
+ TCP_FLAG_ACK = 4096,
+ TCP_FLAG_PSH = 2048,
+ TCP_FLAG_RST = 1024,
+ TCP_FLAG_SYN = 512,
+ TCP_FLAG_FIN = 256,
+ TCP_RESERVED_BITS = 15,
+ TCP_DATA_OFFSET = 240,
+};
+
+enum tcp_bit_set {
+ TCP_SYN_SET = 0,
+ TCP_SYNACK_SET = 1,
+ TCP_FIN_SET = 2,
+ TCP_ACK_SET = 3,
+ TCP_RST_SET = 4,
+ TCP_NONE_SET = 5,
+};
+
+enum ctattr_protoinfo {
+ CTA_PROTOINFO_UNSPEC = 0,
+ CTA_PROTOINFO_TCP = 1,
+ CTA_PROTOINFO_DCCP = 2,
+ CTA_PROTOINFO_SCTP = 3,
+ __CTA_PROTOINFO_MAX = 4,
+};
+
+enum ctattr_protoinfo_tcp {
+ CTA_PROTOINFO_TCP_UNSPEC = 0,
+ CTA_PROTOINFO_TCP_STATE = 1,
+ CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2,
+ CTA_PROTOINFO_TCP_WSCALE_REPLY = 3,
+ CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4,
+ CTA_PROTOINFO_TCP_FLAGS_REPLY = 5,
+ __CTA_PROTOINFO_TCP_MAX = 6,
+};
+
+enum ctattr_timeout_tcp {
+ CTA_TIMEOUT_TCP_UNSPEC = 0,
+ CTA_TIMEOUT_TCP_SYN_SENT = 1,
+ CTA_TIMEOUT_TCP_SYN_RECV = 2,
+ CTA_TIMEOUT_TCP_ESTABLISHED = 3,
+ CTA_TIMEOUT_TCP_FIN_WAIT = 4,
+ CTA_TIMEOUT_TCP_CLOSE_WAIT = 5,
+ CTA_TIMEOUT_TCP_LAST_ACK = 6,
+ CTA_TIMEOUT_TCP_TIME_WAIT = 7,
+ CTA_TIMEOUT_TCP_CLOSE = 8,
+ CTA_TIMEOUT_TCP_SYN_SENT2 = 9,
+ CTA_TIMEOUT_TCP_RETRANS = 10,
+ CTA_TIMEOUT_TCP_UNACK = 11,
+ __CTA_TIMEOUT_TCP_MAX = 12,
+};
+
+enum ctattr_timeout_udp {
+ CTA_TIMEOUT_UDP_UNSPEC = 0,
+ CTA_TIMEOUT_UDP_UNREPLIED = 1,
+ CTA_TIMEOUT_UDP_REPLIED = 2,
+ __CTA_TIMEOUT_UDP_MAX = 3,
+};
+
+enum ctattr_timeout_icmp {
+ CTA_TIMEOUT_ICMP_UNSPEC = 0,
+ CTA_TIMEOUT_ICMP_TIMEOUT = 1,
+ __CTA_TIMEOUT_ICMP_MAX = 2,
+};
+
+struct tcp_sack_block_wire {
+ __be32 start_seq;
+ __be32 end_seq;
+};
+
+struct icmpv6_echo {
+ __be16 identifier;
+ __be16 sequence;
+};
+
+struct icmpv6_nd_advt {
+ __u32 reserved: 5;
+ __u32 override: 1;
+ __u32 solicited: 1;
+ __u32 router: 1;
+ __u32 reserved2: 24;
+};
+
+struct icmpv6_nd_ra {
+ __u8 hop_limit;
+ __u8 reserved: 3;
+ __u8 router_pref: 2;
+ __u8 home_agent: 1;
+ __u8 other: 1;
+ __u8 managed: 1;
+ __be16 rt_lifetime;
+};
+
+struct icmp6hdr {
+ __u8 icmp6_type;
+ __u8 icmp6_code;
+ __sum16 icmp6_cksum;
+ union {
+ __be32 un_data32[1];
+ __be16 un_data16[2];
+ __u8 un_data8[4];
+ struct icmpv6_echo u_echo;
+ struct icmpv6_nd_advt u_nd_advt;
+ struct icmpv6_nd_ra u_nd_ra;
+ } icmp6_dataun;
+};
+
+enum ctattr_timeout_icmpv6 {
+ CTA_TIMEOUT_ICMPV6_UNSPEC = 0,
+ CTA_TIMEOUT_ICMPV6_TIMEOUT = 1,
+ __CTA_TIMEOUT_ICMPV6_MAX = 2,
+};
+
+enum retry_state {
+ STATE_CONGESTED = 0,
+ STATE_RESTART = 1,
+ STATE_DONE = 2,
+};
+
+enum ct_dccp_roles {
+ CT_DCCP_ROLE_CLIENT = 0,
+ CT_DCCP_ROLE_SERVER = 1,
+ __CT_DCCP_ROLE_MAX = 2,
+};
+
+struct dccp_hdr_ext {
+ __be32 dccph_seq_low;
+};
+
+struct dccp_hdr_ack_bits {
+ __be16 dccph_reserved1;
+ __be16 dccph_ack_nr_high;
+ __be32 dccph_ack_nr_low;
+};
+
+enum dccp_pkt_type {
+ DCCP_PKT_REQUEST = 0,
+ DCCP_PKT_RESPONSE = 1,
+ DCCP_PKT_DATA = 2,
+ DCCP_PKT_ACK = 3,
+ DCCP_PKT_DATAACK = 4,
+ DCCP_PKT_CLOSEREQ = 5,
+ DCCP_PKT_CLOSE = 6,
+ DCCP_PKT_RESET = 7,
+ DCCP_PKT_SYNC = 8,
+ DCCP_PKT_SYNCACK = 9,
+ DCCP_PKT_INVALID = 10,
+};
+
+enum ctattr_protoinfo_dccp {
+ CTA_PROTOINFO_DCCP_UNSPEC = 0,
+ CTA_PROTOINFO_DCCP_STATE = 1,
+ CTA_PROTOINFO_DCCP_ROLE = 2,
+ CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ = 3,
+ CTA_PROTOINFO_DCCP_PAD = 4,
+ __CTA_PROTOINFO_DCCP_MAX = 5,
+};
+
+enum ctattr_timeout_dccp {
+ CTA_TIMEOUT_DCCP_UNSPEC = 0,
+ CTA_TIMEOUT_DCCP_REQUEST = 1,
+ CTA_TIMEOUT_DCCP_RESPOND = 2,
+ CTA_TIMEOUT_DCCP_PARTOPEN = 3,
+ CTA_TIMEOUT_DCCP_OPEN = 4,
+ CTA_TIMEOUT_DCCP_CLOSEREQ = 5,
+ CTA_TIMEOUT_DCCP_CLOSING = 6,
+ CTA_TIMEOUT_DCCP_TIMEWAIT = 7,
+ __CTA_TIMEOUT_DCCP_MAX = 8,
+};
+
+struct sctp_chunkhdr {
+ __u8 type;
+ __u8 flags;
+ __be16 length;
+};
+
+enum sctp_cid {
+ SCTP_CID_DATA = 0,
+ SCTP_CID_INIT = 1,
+ SCTP_CID_INIT_ACK = 2,
+ SCTP_CID_SACK = 3,
+ SCTP_CID_HEARTBEAT = 4,
+ SCTP_CID_HEARTBEAT_ACK = 5,
+ SCTP_CID_ABORT = 6,
+ SCTP_CID_SHUTDOWN = 7,
+ SCTP_CID_SHUTDOWN_ACK = 8,
+ SCTP_CID_ERROR = 9,
+ SCTP_CID_COOKIE_ECHO = 10,
+ SCTP_CID_COOKIE_ACK = 11,
+ SCTP_CID_ECN_ECNE = 12,
+ SCTP_CID_ECN_CWR = 13,
+ SCTP_CID_SHUTDOWN_COMPLETE = 14,
+ SCTP_CID_AUTH = 15,
+ SCTP_CID_I_DATA = 64,
+ SCTP_CID_FWD_TSN = 192,
+ SCTP_CID_ASCONF = 193,
+ SCTP_CID_I_FWD_TSN = 194,
+ SCTP_CID_ASCONF_ACK = 128,
+ SCTP_CID_RECONF = 130,
+};
+
+enum {
+ SCTP_CHUNK_FLAG_T = 1,
+};
+
+struct sctp_inithdr {
+ __be32 init_tag;
+ __be32 a_rwnd;
+ __be16 num_outbound_streams;
+ __be16 num_inbound_streams;
+ __be32 initial_tsn;
+ __u8 params[0];
+};
+
+enum {
+ SCTP_MAX_STREAM = 65535,
+};
+
+enum sctp_event_timeout {
+ SCTP_EVENT_TIMEOUT_NONE = 0,
+ SCTP_EVENT_TIMEOUT_T1_COOKIE = 1,
+ SCTP_EVENT_TIMEOUT_T1_INIT = 2,
+ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3,
+ SCTP_EVENT_TIMEOUT_T3_RTX = 4,
+ SCTP_EVENT_TIMEOUT_T4_RTO = 5,
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6,
+ SCTP_EVENT_TIMEOUT_HEARTBEAT = 7,
+ SCTP_EVENT_TIMEOUT_RECONF = 8,
+ SCTP_EVENT_TIMEOUT_SACK = 9,
+ SCTP_EVENT_TIMEOUT_AUTOCLOSE = 10,
+};
+
+enum {
+ SCTP_MAX_DUP_TSNS = 16,
+};
+
+enum {
+ SCTP_AUTH_HMAC_ID_RESERVED_0 = 0,
+ SCTP_AUTH_HMAC_ID_SHA1 = 1,
+ SCTP_AUTH_HMAC_ID_RESERVED_2 = 2,
+ SCTP_AUTH_HMAC_ID_SHA256 = 3,
+ __SCTP_AUTH_HMAC_MAX = 4,
+};
+
+struct sctp_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+struct sctp_hashbucket {
+ rwlock_t lock;
+ struct hlist_head chain;
+};
+
+struct sctp_globals {
+ struct list_head address_families;
+ struct sctp_hashbucket *ep_hashtable;
+ struct sctp_bind_hashbucket *port_hashtable;
+ struct rhltable transport_hashtable;
+ int ep_hashsize;
+ int port_hashsize;
+ __u16 max_instreams;
+ __u16 max_outstreams;
+ bool checksum_disable;
+};
+
+enum {
+ SCTP_MIB_NUM = 0,
+ SCTP_MIB_CURRESTAB = 1,
+ SCTP_MIB_ACTIVEESTABS = 2,
+ SCTP_MIB_PASSIVEESTABS = 3,
+ SCTP_MIB_ABORTEDS = 4,
+ SCTP_MIB_SHUTDOWNS = 5,
+ SCTP_MIB_OUTOFBLUES = 6,
+ SCTP_MIB_CHECKSUMERRORS = 7,
+ SCTP_MIB_OUTCTRLCHUNKS = 8,
+ SCTP_MIB_OUTORDERCHUNKS = 9,
+ SCTP_MIB_OUTUNORDERCHUNKS = 10,
+ SCTP_MIB_INCTRLCHUNKS = 11,
+ SCTP_MIB_INORDERCHUNKS = 12,
+ SCTP_MIB_INUNORDERCHUNKS = 13,
+ SCTP_MIB_FRAGUSRMSGS = 14,
+ SCTP_MIB_REASMUSRMSGS = 15,
+ SCTP_MIB_OUTSCTPPACKS = 16,
+ SCTP_MIB_INSCTPPACKS = 17,
+ SCTP_MIB_T1_INIT_EXPIREDS = 18,
+ SCTP_MIB_T1_COOKIE_EXPIREDS = 19,
+ SCTP_MIB_T2_SHUTDOWN_EXPIREDS = 20,
+ SCTP_MIB_T3_RTX_EXPIREDS = 21,
+ SCTP_MIB_T4_RTO_EXPIREDS = 22,
+ SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS = 23,
+ SCTP_MIB_DELAY_SACK_EXPIREDS = 24,
+ SCTP_MIB_AUTOCLOSE_EXPIREDS = 25,
+ SCTP_MIB_T1_RETRANSMITS = 26,
+ SCTP_MIB_T3_RETRANSMITS = 27,
+ SCTP_MIB_PMTUD_RETRANSMITS = 28,
+ SCTP_MIB_FAST_RETRANSMITS = 29,
+ SCTP_MIB_IN_PKT_SOFTIRQ = 30,
+ SCTP_MIB_IN_PKT_BACKLOG = 31,
+ SCTP_MIB_IN_PKT_DISCARDS = 32,
+ SCTP_MIB_IN_DATA_CHUNK_DISCARDS = 33,
+ __SCTP_MIB_MAX = 34,
+};
+
+enum ctattr_protoinfo_sctp {
+ CTA_PROTOINFO_SCTP_UNSPEC = 0,
+ CTA_PROTOINFO_SCTP_STATE = 1,
+ CTA_PROTOINFO_SCTP_VTAG_ORIGINAL = 2,
+ CTA_PROTOINFO_SCTP_VTAG_REPLY = 3,
+ __CTA_PROTOINFO_SCTP_MAX = 4,
+};
+
+enum ctattr_timeout_sctp {
+ CTA_TIMEOUT_SCTP_UNSPEC = 0,
+ CTA_TIMEOUT_SCTP_CLOSED = 1,
+ CTA_TIMEOUT_SCTP_COOKIE_WAIT = 2,
+ CTA_TIMEOUT_SCTP_COOKIE_ECHOED = 3,
+ CTA_TIMEOUT_SCTP_ESTABLISHED = 4,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_SENT = 5,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_RECD = 6,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT = 7,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT = 8,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED = 9,
+ __CTA_TIMEOUT_SCTP_MAX = 10,
+};
+
+struct nf_ct_gre_keymap {
+ struct list_head list;
+ struct nf_conntrack_tuple tuple;
+ struct callback_head rcu;
+};
+
+enum pptp_ctrlsess_state {
+ PPTP_SESSION_NONE = 0,
+ PPTP_SESSION_ERROR = 1,
+ PPTP_SESSION_STOPREQ = 2,
+ PPTP_SESSION_REQUESTED = 3,
+ PPTP_SESSION_CONFIRMED = 4,
+};
+
+enum pptp_ctrlcall_state {
+ PPTP_CALL_NONE = 0,
+ PPTP_CALL_ERROR = 1,
+ PPTP_CALL_OUT_REQ = 2,
+ PPTP_CALL_OUT_CONF = 3,
+ PPTP_CALL_IN_REQ = 4,
+ PPTP_CALL_IN_REP = 5,
+ PPTP_CALL_IN_CONF = 6,
+ PPTP_CALL_CLEAR_REQ = 7,
+};
+
+struct nf_ct_pptp_master {
+ enum pptp_ctrlsess_state sstate;
+ enum pptp_ctrlcall_state cstate;
+ __be16 pac_call_id;
+ __be16 pns_call_id;
+ struct nf_ct_gre_keymap *keymap[2];
+};
+
+enum ctattr_timeout_gre {
+ CTA_TIMEOUT_GRE_UNSPEC = 0,
+ CTA_TIMEOUT_GRE_UNREPLIED = 1,
+ CTA_TIMEOUT_GRE_REPLIED = 2,
+ __CTA_TIMEOUT_GRE_MAX = 3,
+};
+
+enum cntl_msg_types {
+ IPCTNL_MSG_CT_NEW = 0,
+ IPCTNL_MSG_CT_GET = 1,
+ IPCTNL_MSG_CT_DELETE = 2,
+ IPCTNL_MSG_CT_GET_CTRZERO = 3,
+ IPCTNL_MSG_CT_GET_STATS_CPU = 4,
+ IPCTNL_MSG_CT_GET_STATS = 5,
+ IPCTNL_MSG_CT_GET_DYING = 6,
+ IPCTNL_MSG_CT_GET_UNCONFIRMED = 7,
+ IPCTNL_MSG_MAX = 8,
+};
+
+enum ctnl_exp_msg_types {
+ IPCTNL_MSG_EXP_NEW = 0,
+ IPCTNL_MSG_EXP_GET = 1,
+ IPCTNL_MSG_EXP_DELETE = 2,
+ IPCTNL_MSG_EXP_GET_STATS_CPU = 3,
+ IPCTNL_MSG_EXP_MAX = 4,
+};
+
+enum ctattr_type {
+ CTA_UNSPEC = 0,
+ CTA_TUPLE_ORIG = 1,
+ CTA_TUPLE_REPLY = 2,
+ CTA_STATUS = 3,
+ CTA_PROTOINFO = 4,
+ CTA_HELP = 5,
+ CTA_NAT_SRC = 6,
+ CTA_TIMEOUT = 7,
+ CTA_MARK = 8,
+ CTA_COUNTERS_ORIG = 9,
+ CTA_COUNTERS_REPLY = 10,
+ CTA_USE = 11,
+ CTA_ID = 12,
+ CTA_NAT_DST = 13,
+ CTA_TUPLE_MASTER = 14,
+ CTA_SEQ_ADJ_ORIG = 15,
+ CTA_NAT_SEQ_ADJ_ORIG = 15,
+ CTA_SEQ_ADJ_REPLY = 16,
+ CTA_NAT_SEQ_ADJ_REPLY = 16,
+ CTA_SECMARK = 17,
+ CTA_ZONE = 18,
+ CTA_SECCTX = 19,
+ CTA_TIMESTAMP = 20,
+ CTA_MARK_MASK = 21,
+ CTA_LABELS = 22,
+ CTA_LABELS_MASK = 23,
+ CTA_SYNPROXY = 24,
+ CTA_FILTER = 25,
+ __CTA_MAX = 26,
+};
+
+enum ctattr_tuple {
+ CTA_TUPLE_UNSPEC = 0,
+ CTA_TUPLE_IP = 1,
+ CTA_TUPLE_PROTO = 2,
+ CTA_TUPLE_ZONE = 3,
+ __CTA_TUPLE_MAX = 4,
+};
+
+enum ctattr_ip {
+ CTA_IP_UNSPEC = 0,
+ CTA_IP_V4_SRC = 1,
+ CTA_IP_V4_DST = 2,
+ CTA_IP_V6_SRC = 3,
+ CTA_IP_V6_DST = 4,
+ __CTA_IP_MAX = 5,
+};
+
+enum ctattr_counters {
+ CTA_COUNTERS_UNSPEC = 0,
+ CTA_COUNTERS_PACKETS = 1,
+ CTA_COUNTERS_BYTES = 2,
+ CTA_COUNTERS32_PACKETS = 3,
+ CTA_COUNTERS32_BYTES = 4,
+ CTA_COUNTERS_PAD = 5,
+ __CTA_COUNTERS_MAX = 6,
+};
+
+enum ctattr_tstamp {
+ CTA_TIMESTAMP_UNSPEC = 0,
+ CTA_TIMESTAMP_START = 1,
+ CTA_TIMESTAMP_STOP = 2,
+ CTA_TIMESTAMP_PAD = 3,
+ __CTA_TIMESTAMP_MAX = 4,
+};
+
+enum ctattr_seqadj {
+ CTA_SEQADJ_UNSPEC = 0,
+ CTA_SEQADJ_CORRECTION_POS = 1,
+ CTA_SEQADJ_OFFSET_BEFORE = 2,
+ CTA_SEQADJ_OFFSET_AFTER = 3,
+ __CTA_SEQADJ_MAX = 4,
+};
+
+enum ctattr_synproxy {
+ CTA_SYNPROXY_UNSPEC = 0,
+ CTA_SYNPROXY_ISN = 1,
+ CTA_SYNPROXY_ITS = 2,
+ CTA_SYNPROXY_TSOFF = 3,
+ __CTA_SYNPROXY_MAX = 4,
+};
+
+enum ctattr_expect {
+ CTA_EXPECT_UNSPEC = 0,
+ CTA_EXPECT_MASTER = 1,
+ CTA_EXPECT_TUPLE = 2,
+ CTA_EXPECT_MASK = 3,
+ CTA_EXPECT_TIMEOUT = 4,
+ CTA_EXPECT_ID = 5,
+ CTA_EXPECT_HELP_NAME = 6,
+ CTA_EXPECT_ZONE = 7,
+ CTA_EXPECT_FLAGS = 8,
+ CTA_EXPECT_CLASS = 9,
+ CTA_EXPECT_NAT = 10,
+ CTA_EXPECT_FN = 11,
+ __CTA_EXPECT_MAX = 12,
+};
+
+enum ctattr_expect_nat {
+ CTA_EXPECT_NAT_UNSPEC = 0,
+ CTA_EXPECT_NAT_DIR = 1,
+ CTA_EXPECT_NAT_TUPLE = 2,
+ __CTA_EXPECT_NAT_MAX = 3,
+};
+
+enum ctattr_help {
+ CTA_HELP_UNSPEC = 0,
+ CTA_HELP_NAME = 1,
+ CTA_HELP_INFO = 2,
+ __CTA_HELP_MAX = 3,
+};
+
+enum ctattr_stats_cpu {
+ CTA_STATS_UNSPEC = 0,
+ CTA_STATS_SEARCHED = 1,
+ CTA_STATS_FOUND = 2,
+ CTA_STATS_NEW = 3,
+ CTA_STATS_INVALID = 4,
+ CTA_STATS_IGNORE = 5,
+ CTA_STATS_DELETE = 6,
+ CTA_STATS_DELETE_LIST = 7,
+ CTA_STATS_INSERT = 8,
+ CTA_STATS_INSERT_FAILED = 9,
+ CTA_STATS_DROP = 10,
+ CTA_STATS_EARLY_DROP = 11,
+ CTA_STATS_ERROR = 12,
+ CTA_STATS_SEARCH_RESTART = 13,
+ __CTA_STATS_MAX = 14,
+};
+
+enum ctattr_stats_global {
+ CTA_STATS_GLOBAL_UNSPEC = 0,
+ CTA_STATS_GLOBAL_ENTRIES = 1,
+ CTA_STATS_GLOBAL_MAX_ENTRIES = 2,
+ __CTA_STATS_GLOBAL_MAX = 3,
+};
+
+enum ctattr_expect_stats {
+ CTA_STATS_EXP_UNSPEC = 0,
+ CTA_STATS_EXP_NEW = 1,
+ CTA_STATS_EXP_CREATE = 2,
+ CTA_STATS_EXP_DELETE = 3,
+ __CTA_STATS_EXP_MAX = 4,
+};
+
+enum ctattr_filter {
+ CTA_FILTER_UNSPEC = 0,
+ CTA_FILTER_ORIG_FLAGS = 1,
+ CTA_FILTER_REPLY_FLAGS = 2,
+ __CTA_FILTER_MAX = 3,
+};
+
+struct ctnetlink_filter {
+ u_int32_t cta_flags;
+ u8 family;
+ u_int32_t orig_flags;
+ u_int32_t reply_flags;
+ struct nf_conntrack_tuple orig;
+ struct nf_conntrack_tuple reply;
+ struct nf_conntrack_zone zone;
+ struct {
+ u_int32_t val;
+ u_int32_t mask;
+ } mark;
+};
+
+struct ctnl_timeout {
+ struct list_head head;
+ struct callback_head callback_head;
+ refcount_t refcnt;
+ char name[32];
+ struct nf_ct_timeout timeout;
+};
+
+enum ctnl_timeout_msg_types {
+ IPCTNL_MSG_TIMEOUT_NEW = 0,
+ IPCTNL_MSG_TIMEOUT_GET = 1,
+ IPCTNL_MSG_TIMEOUT_DELETE = 2,
+ IPCTNL_MSG_TIMEOUT_DEFAULT_SET = 3,
+ IPCTNL_MSG_TIMEOUT_DEFAULT_GET = 4,
+ IPCTNL_MSG_TIMEOUT_MAX = 5,
+};
+
+enum ctattr_timeout {
+ CTA_TIMEOUT_UNSPEC = 0,
+ CTA_TIMEOUT_NAME = 1,
+ CTA_TIMEOUT_L3PROTO = 2,
+ CTA_TIMEOUT_L4PROTO = 3,
+ CTA_TIMEOUT_DATA = 4,
+ CTA_TIMEOUT_USE = 5,
+ __CTA_TIMEOUT_MAX = 6,
+};
+
+enum nfnl_acct_msg_types___2 {
+ NFNL_MSG_CTHELPER_NEW = 0,
+ NFNL_MSG_CTHELPER_GET = 1,
+ NFNL_MSG_CTHELPER_DEL = 2,
+ NFNL_MSG_CTHELPER_MAX = 3,
+};
+
+enum nfnl_cthelper_type {
+ NFCTH_UNSPEC = 0,
+ NFCTH_NAME = 1,
+ NFCTH_TUPLE = 2,
+ NFCTH_QUEUE_NUM = 3,
+ NFCTH_POLICY = 4,
+ NFCTH_PRIV_DATA_LEN = 5,
+ NFCTH_STATUS = 6,
+ __NFCTH_MAX = 7,
+};
+
+enum nfnl_cthelper_policy_type {
+ NFCTH_POLICY_SET_UNSPEC = 0,
+ NFCTH_POLICY_SET_NUM = 1,
+ NFCTH_POLICY_SET = 2,
+ NFCTH_POLICY_SET1 = 2,
+ NFCTH_POLICY_SET2 = 3,
+ NFCTH_POLICY_SET3 = 4,
+ NFCTH_POLICY_SET4 = 5,
+ __NFCTH_POLICY_SET_MAX = 6,
+};
+
+enum nfnl_cthelper_pol_type {
+ NFCTH_POLICY_UNSPEC = 0,
+ NFCTH_POLICY_NAME = 1,
+ NFCTH_POLICY_EXPECT_MAX = 2,
+ NFCTH_POLICY_EXPECT_TIMEOUT = 3,
+ __NFCTH_POLICY_MAX = 4,
+};
+
+enum nfnl_cthelper_tuple_type {
+ NFCTH_TUPLE_UNSPEC = 0,
+ NFCTH_TUPLE_L3PROTONUM = 1,
+ NFCTH_TUPLE_L4PROTONUM = 2,
+ __NFCTH_TUPLE_MAX = 3,
+};
+
+struct nfnl_cthelper {
+ struct list_head list;
+ struct nf_conntrack_helper helper;
+};
+
+enum amanda_strings {
+ SEARCH_CONNECT = 0,
+ SEARCH_NEWLINE = 1,
+ SEARCH_DATA = 2,
+ SEARCH_MESG = 3,
+ SEARCH_INDEX = 4,
+ SEARCH_STATE = 5,
+};
+
+enum nf_ct_ftp_type {
+ NF_CT_FTP_PORT = 0,
+ NF_CT_FTP_PASV = 1,
+ NF_CT_FTP_EPRT = 2,
+ NF_CT_FTP_EPSV = 3,
+};
+
+struct nf_ct_ftp_master {
+ u_int32_t seq_aft_nl[4];
+ u_int16_t seq_aft_nl_num[2];
+ u_int16_t flags[2];
+};
+
+struct ftp_search {
+ const char *pattern;
+ size_t plen;
+ char skip;
+ char term;
+ enum nf_ct_ftp_type ftptype;
+ int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *);
+};
+
+struct TransportAddress_ipAddress {
+ int options;
+ unsigned int ip;
+};
+
+typedef struct TransportAddress_ipAddress TransportAddress_ipAddress;
+
+struct TransportAddress_ip6Address {
+ int options;
+ unsigned int ip;
+};
+
+typedef struct TransportAddress_ip6Address TransportAddress_ip6Address;
+
+struct TransportAddress {
+ enum {
+ eTransportAddress_ipAddress = 0,
+ eTransportAddress_ipSourceRoute = 1,
+ eTransportAddress_ipxAddress = 2,
+ eTransportAddress_ip6Address = 3,
+ eTransportAddress_netBios = 4,
+ eTransportAddress_nsap = 5,
+ eTransportAddress_nonStandardAddress = 6,
+ } choice;
+ union {
+ TransportAddress_ipAddress ipAddress;
+ TransportAddress_ip6Address ip6Address;
+ };
+};
+
+typedef struct TransportAddress TransportAddress;
+
+struct DataProtocolCapability {
+ enum {
+ eDataProtocolCapability_nonStandard = 0,
+ eDataProtocolCapability_v14buffered = 1,
+ eDataProtocolCapability_v42lapm = 2,
+ eDataProtocolCapability_hdlcFrameTunnelling = 3,
+ eDataProtocolCapability_h310SeparateVCStack = 4,
+ eDataProtocolCapability_h310SingleVCStack = 5,
+ eDataProtocolCapability_transparent = 6,
+ eDataProtocolCapability_segmentationAndReassembly = 7,
+ eDataProtocolCapability_hdlcFrameTunnelingwSAR = 8,
+ eDataProtocolCapability_v120 = 9,
+ eDataProtocolCapability_separateLANStack = 10,
+ eDataProtocolCapability_v76wCompression = 11,
+ eDataProtocolCapability_tcp = 12,
+ eDataProtocolCapability_udp = 13,
+ } choice;
+};
+
+typedef struct DataProtocolCapability DataProtocolCapability;
+
+struct DataApplicationCapability_application {
+ enum {
+ eDataApplicationCapability_application_nonStandard = 0,
+ eDataApplicationCapability_application_t120 = 1,
+ eDataApplicationCapability_application_dsm_cc = 2,
+ eDataApplicationCapability_application_userData = 3,
+ eDataApplicationCapability_application_t84 = 4,
+ eDataApplicationCapability_application_t434 = 5,
+ eDataApplicationCapability_application_h224 = 6,
+ eDataApplicationCapability_application_nlpid = 7,
+ eDataApplicationCapability_application_dsvdControl = 8,
+ eDataApplicationCapability_application_h222DataPartitioning = 9,
+ eDataApplicationCapability_application_t30fax = 10,
+ eDataApplicationCapability_application_t140 = 11,
+ eDataApplicationCapability_application_t38fax = 12,
+ eDataApplicationCapability_application_genericDataCapability = 13,
+ } choice;
+ union {
+ DataProtocolCapability t120;
+ };
+};
+
+typedef struct DataApplicationCapability_application DataApplicationCapability_application;
+
+struct DataApplicationCapability {
+ int options;
+ DataApplicationCapability_application application;
+};
+
+typedef struct DataApplicationCapability DataApplicationCapability;
+
+struct DataType {
+ enum {
+ eDataType_nonStandard = 0,
+ eDataType_nullData = 1,
+ eDataType_videoData = 2,
+ eDataType_audioData = 3,
+ eDataType_data = 4,
+ eDataType_encryptionData = 5,
+ eDataType_h235Control = 6,
+ eDataType_h235Media = 7,
+ eDataType_multiplexedStream = 8,
+ } choice;
+ union {
+ DataApplicationCapability data;
+ };
+};
+
+typedef struct DataType DataType;
+
+struct UnicastAddress_iPAddress {
+ int options;
+ unsigned int network;
+};
+
+typedef struct UnicastAddress_iPAddress UnicastAddress_iPAddress;
+
+struct UnicastAddress_iP6Address {
+ int options;
+ unsigned int network;
+};
+
+typedef struct UnicastAddress_iP6Address UnicastAddress_iP6Address;
+
+struct UnicastAddress {
+ enum {
+ eUnicastAddress_iPAddress = 0,
+ eUnicastAddress_iPXAddress = 1,
+ eUnicastAddress_iP6Address = 2,
+ eUnicastAddress_netBios = 3,
+ eUnicastAddress_iPSourceRouteAddress = 4,
+ eUnicastAddress_nsap = 5,
+ eUnicastAddress_nonStandardAddress = 6,
+ } choice;
+ union {
+ UnicastAddress_iPAddress iPAddress;
+ UnicastAddress_iP6Address iP6Address;
+ };
+};
+
+typedef struct UnicastAddress UnicastAddress;
+
+struct H245_TransportAddress {
+ enum {
+ eH245_TransportAddress_unicastAddress = 0,
+ eH245_TransportAddress_multicastAddress = 1,
+ } choice;
+ union {
+ UnicastAddress unicastAddress;
+ };
+};
+
+typedef struct H245_TransportAddress H245_TransportAddress;
+
+struct H2250LogicalChannelParameters {
+ enum {
+ eH2250LogicalChannelParameters_nonStandard = -2147483648,
+ eH2250LogicalChannelParameters_associatedSessionID = 1073741824,
+ eH2250LogicalChannelParameters_mediaChannel = 536870912,
+ eH2250LogicalChannelParameters_mediaGuaranteedDelivery = 268435456,
+ eH2250LogicalChannelParameters_mediaControlChannel = 134217728,
+ eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery = 67108864,
+ eH2250LogicalChannelParameters_silenceSuppression = 33554432,
+ eH2250LogicalChannelParameters_destination = 16777216,
+ eH2250LogicalChannelParameters_dynamicRTPPayloadType = 8388608,
+ eH2250LogicalChannelParameters_mediaPacketization = 4194304,
+ eH2250LogicalChannelParameters_transportCapability = 2097152,
+ eH2250LogicalChannelParameters_redundancyEncoding = 1048576,
+ eH2250LogicalChannelParameters_source = 524288,
+ } options;
+ H245_TransportAddress mediaChannel;
+ H245_TransportAddress mediaControlChannel;
+};
+
+typedef struct H2250LogicalChannelParameters H2250LogicalChannelParameters;
+
+struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters {
+ enum {
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters = 0,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters = 1,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters = 2,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 3,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none = 4,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+};
+
+typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters;
+
+struct OpenLogicalChannel_forwardLogicalChannelParameters {
+ enum {
+ eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber = -2147483648,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency = 1073741824,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor = 536870912,
+ } options;
+ DataType dataType;
+ OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters multiplexParameters;
+};
+
+typedef struct OpenLogicalChannel_forwardLogicalChannelParameters OpenLogicalChannel_forwardLogicalChannelParameters;
+
+struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters {
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters = 0,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters = 1,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 2,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+};
+
+typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters;
+
+struct OpenLogicalChannel_reverseLogicalChannelParameters {
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters = -2147483648,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency = 1073741824,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor = 536870912,
+ } options;
+ OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters multiplexParameters;
+};
+
+typedef struct OpenLogicalChannel_reverseLogicalChannelParameters OpenLogicalChannel_reverseLogicalChannelParameters;
+
+struct NetworkAccessParameters_networkAddress {
+ enum {
+ eNetworkAccessParameters_networkAddress_q2931Address = 0,
+ eNetworkAccessParameters_networkAddress_e164Address = 1,
+ eNetworkAccessParameters_networkAddress_localAreaAddress = 2,
+ } choice;
+ union {
+ H245_TransportAddress localAreaAddress;
+ };
+};
+
+typedef struct NetworkAccessParameters_networkAddress NetworkAccessParameters_networkAddress;
+
+struct NetworkAccessParameters {
+ enum {
+ eNetworkAccessParameters_distribution = -2147483648,
+ eNetworkAccessParameters_externalReference = 1073741824,
+ eNetworkAccessParameters_t120SetupProcedure = 536870912,
+ } options;
+ NetworkAccessParameters_networkAddress networkAddress;
+};
+
+typedef struct NetworkAccessParameters NetworkAccessParameters;
+
+struct OpenLogicalChannel {
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters = -2147483648,
+ eOpenLogicalChannel_separateStack = 1073741824,
+ eOpenLogicalChannel_encryptionSync = 536870912,
+ } options;
+ OpenLogicalChannel_forwardLogicalChannelParameters forwardLogicalChannelParameters;
+ OpenLogicalChannel_reverseLogicalChannelParameters reverseLogicalChannelParameters;
+ NetworkAccessParameters separateStack;
+};
+
+typedef struct OpenLogicalChannel OpenLogicalChannel;
+
+struct Setup_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct Setup_UUIE_fastStart Setup_UUIE_fastStart;
+
+struct Setup_UUIE {
+ enum {
+ eSetup_UUIE_h245Address = -2147483648,
+ eSetup_UUIE_sourceAddress = 1073741824,
+ eSetup_UUIE_destinationAddress = 536870912,
+ eSetup_UUIE_destCallSignalAddress = 268435456,
+ eSetup_UUIE_destExtraCallInfo = 134217728,
+ eSetup_UUIE_destExtraCRV = 67108864,
+ eSetup_UUIE_callServices = 33554432,
+ eSetup_UUIE_sourceCallSignalAddress = 16777216,
+ eSetup_UUIE_remoteExtensionAddress = 8388608,
+ eSetup_UUIE_callIdentifier = 4194304,
+ eSetup_UUIE_h245SecurityCapability = 2097152,
+ eSetup_UUIE_tokens = 1048576,
+ eSetup_UUIE_cryptoTokens = 524288,
+ eSetup_UUIE_fastStart = 262144,
+ eSetup_UUIE_mediaWaitForConnect = 131072,
+ eSetup_UUIE_canOverlapSend = 65536,
+ eSetup_UUIE_endpointIdentifier = 32768,
+ eSetup_UUIE_multipleCalls = 16384,
+ eSetup_UUIE_maintainConnection = 8192,
+ eSetup_UUIE_connectionParameters = 4096,
+ eSetup_UUIE_language = 2048,
+ eSetup_UUIE_presentationIndicator = 1024,
+ eSetup_UUIE_screeningIndicator = 512,
+ eSetup_UUIE_serviceControl = 256,
+ eSetup_UUIE_symmetricOperationRequired = 128,
+ eSetup_UUIE_capacity = 64,
+ eSetup_UUIE_circuitInfo = 32,
+ eSetup_UUIE_desiredProtocols = 16,
+ eSetup_UUIE_neededFeatures = 8,
+ eSetup_UUIE_desiredFeatures = 4,
+ eSetup_UUIE_supportedFeatures = 2,
+ eSetup_UUIE_parallelH245Control = 1,
+ } options;
+ TransportAddress h245Address;
+ TransportAddress destCallSignalAddress;
+ TransportAddress sourceCallSignalAddress;
+ Setup_UUIE_fastStart fastStart;
+};
+
+typedef struct Setup_UUIE Setup_UUIE;
+
+struct CallProceeding_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct CallProceeding_UUIE_fastStart CallProceeding_UUIE_fastStart;
+
+struct CallProceeding_UUIE {
+ enum {
+ eCallProceeding_UUIE_h245Address = -2147483648,
+ eCallProceeding_UUIE_callIdentifier = 1073741824,
+ eCallProceeding_UUIE_h245SecurityMode = 536870912,
+ eCallProceeding_UUIE_tokens = 268435456,
+ eCallProceeding_UUIE_cryptoTokens = 134217728,
+ eCallProceeding_UUIE_fastStart = 67108864,
+ eCallProceeding_UUIE_multipleCalls = 33554432,
+ eCallProceeding_UUIE_maintainConnection = 16777216,
+ eCallProceeding_UUIE_fastConnectRefused = 8388608,
+ eCallProceeding_UUIE_featureSet = 4194304,
+ } options;
+ TransportAddress h245Address;
+ CallProceeding_UUIE_fastStart fastStart;
+};
+
+typedef struct CallProceeding_UUIE CallProceeding_UUIE;
+
+struct Connect_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct Connect_UUIE_fastStart Connect_UUIE_fastStart;
+
+struct Connect_UUIE {
+ enum {
+ eConnect_UUIE_h245Address = -2147483648,
+ eConnect_UUIE_callIdentifier = 1073741824,
+ eConnect_UUIE_h245SecurityMode = 536870912,
+ eConnect_UUIE_tokens = 268435456,
+ eConnect_UUIE_cryptoTokens = 134217728,
+ eConnect_UUIE_fastStart = 67108864,
+ eConnect_UUIE_multipleCalls = 33554432,
+ eConnect_UUIE_maintainConnection = 16777216,
+ eConnect_UUIE_language = 8388608,
+ eConnect_UUIE_connectedAddress = 4194304,
+ eConnect_UUIE_presentationIndicator = 2097152,
+ eConnect_UUIE_screeningIndicator = 1048576,
+ eConnect_UUIE_fastConnectRefused = 524288,
+ eConnect_UUIE_serviceControl = 262144,
+ eConnect_UUIE_capacity = 131072,
+ eConnect_UUIE_featureSet = 65536,
+ } options;
+ TransportAddress h245Address;
+ Connect_UUIE_fastStart fastStart;
+};
+
+typedef struct Connect_UUIE Connect_UUIE;
+
+struct Alerting_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct Alerting_UUIE_fastStart Alerting_UUIE_fastStart;
+
+struct Alerting_UUIE {
+ enum {
+ eAlerting_UUIE_h245Address = -2147483648,
+ eAlerting_UUIE_callIdentifier = 1073741824,
+ eAlerting_UUIE_h245SecurityMode = 536870912,
+ eAlerting_UUIE_tokens = 268435456,
+ eAlerting_UUIE_cryptoTokens = 134217728,
+ eAlerting_UUIE_fastStart = 67108864,
+ eAlerting_UUIE_multipleCalls = 33554432,
+ eAlerting_UUIE_maintainConnection = 16777216,
+ eAlerting_UUIE_alertingAddress = 8388608,
+ eAlerting_UUIE_presentationIndicator = 4194304,
+ eAlerting_UUIE_screeningIndicator = 2097152,
+ eAlerting_UUIE_fastConnectRefused = 1048576,
+ eAlerting_UUIE_serviceControl = 524288,
+ eAlerting_UUIE_capacity = 262144,
+ eAlerting_UUIE_featureSet = 131072,
+ } options;
+ TransportAddress h245Address;
+ Alerting_UUIE_fastStart fastStart;
+};
+
+typedef struct Alerting_UUIE Alerting_UUIE;
+
+struct FacilityReason {
+ enum {
+ eFacilityReason_routeCallToGatekeeper = 0,
+ eFacilityReason_callForwarded = 1,
+ eFacilityReason_routeCallToMC = 2,
+ eFacilityReason_undefinedReason = 3,
+ eFacilityReason_conferenceListChoice = 4,
+ eFacilityReason_startH245 = 5,
+ eFacilityReason_noH245 = 6,
+ eFacilityReason_newTokens = 7,
+ eFacilityReason_featureSetUpdate = 8,
+ eFacilityReason_forwardedElements = 9,
+ eFacilityReason_transportedInformation = 10,
+ } choice;
+};
+
+typedef struct FacilityReason FacilityReason;
+
+struct Facility_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct Facility_UUIE_fastStart Facility_UUIE_fastStart;
+
+struct Facility_UUIE {
+ enum {
+ eFacility_UUIE_alternativeAddress = -2147483648,
+ eFacility_UUIE_alternativeAliasAddress = 1073741824,
+ eFacility_UUIE_conferenceID = 536870912,
+ eFacility_UUIE_callIdentifier = 268435456,
+ eFacility_UUIE_destExtraCallInfo = 134217728,
+ eFacility_UUIE_remoteExtensionAddress = 67108864,
+ eFacility_UUIE_tokens = 33554432,
+ eFacility_UUIE_cryptoTokens = 16777216,
+ eFacility_UUIE_conferences = 8388608,
+ eFacility_UUIE_h245Address = 4194304,
+ eFacility_UUIE_fastStart = 2097152,
+ eFacility_UUIE_multipleCalls = 1048576,
+ eFacility_UUIE_maintainConnection = 524288,
+ eFacility_UUIE_fastConnectRefused = 262144,
+ eFacility_UUIE_serviceControl = 131072,
+ eFacility_UUIE_circuitInfo = 65536,
+ eFacility_UUIE_featureSet = 32768,
+ eFacility_UUIE_destinationInfo = 16384,
+ eFacility_UUIE_h245SecurityMode = 8192,
+ } options;
+ TransportAddress alternativeAddress;
+ FacilityReason reason;
+ TransportAddress h245Address;
+ Facility_UUIE_fastStart fastStart;
+};
+
+typedef struct Facility_UUIE Facility_UUIE;
+
+struct Progress_UUIE_fastStart {
+ int count;
+ OpenLogicalChannel item[30];
+};
+
+typedef struct Progress_UUIE_fastStart Progress_UUIE_fastStart;
+
+struct Progress_UUIE {
+ enum {
+ eProgress_UUIE_h245Address = -2147483648,
+ eProgress_UUIE_h245SecurityMode = 1073741824,
+ eProgress_UUIE_tokens = 536870912,
+ eProgress_UUIE_cryptoTokens = 268435456,
+ eProgress_UUIE_fastStart = 134217728,
+ eProgress_UUIE_multipleCalls = 67108864,
+ eProgress_UUIE_maintainConnection = 33554432,
+ eProgress_UUIE_fastConnectRefused = 16777216,
+ } options;
+ TransportAddress h245Address;
+ Progress_UUIE_fastStart fastStart;
+};
+
+typedef struct Progress_UUIE Progress_UUIE;
+
+struct H323_UU_PDU_h323_message_body {
+ enum {
+ eH323_UU_PDU_h323_message_body_setup = 0,
+ eH323_UU_PDU_h323_message_body_callProceeding = 1,
+ eH323_UU_PDU_h323_message_body_connect = 2,
+ eH323_UU_PDU_h323_message_body_alerting = 3,
+ eH323_UU_PDU_h323_message_body_information = 4,
+ eH323_UU_PDU_h323_message_body_releaseComplete = 5,
+ eH323_UU_PDU_h323_message_body_facility = 6,
+ eH323_UU_PDU_h323_message_body_progress = 7,
+ eH323_UU_PDU_h323_message_body_empty = 8,
+ eH323_UU_PDU_h323_message_body_status = 9,
+ eH323_UU_PDU_h323_message_body_statusInquiry = 10,
+ eH323_UU_PDU_h323_message_body_setupAcknowledge = 11,
+ eH323_UU_PDU_h323_message_body_notify = 12,
+ } choice;
+ union {
+ Setup_UUIE setup;
+ CallProceeding_UUIE callProceeding;
+ Connect_UUIE connect;
+ Alerting_UUIE alerting;
+ Facility_UUIE facility;
+ Progress_UUIE progress;
+ };
+};
+
+typedef struct H323_UU_PDU_h323_message_body H323_UU_PDU_h323_message_body;
+
+struct RequestMessage {
+ enum {
+ eRequestMessage_nonStandard = 0,
+ eRequestMessage_masterSlaveDetermination = 1,
+ eRequestMessage_terminalCapabilitySet = 2,
+ eRequestMessage_openLogicalChannel = 3,
+ eRequestMessage_closeLogicalChannel = 4,
+ eRequestMessage_requestChannelClose = 5,
+ eRequestMessage_multiplexEntrySend = 6,
+ eRequestMessage_requestMultiplexEntry = 7,
+ eRequestMessage_requestMode = 8,
+ eRequestMessage_roundTripDelayRequest = 9,
+ eRequestMessage_maintenanceLoopRequest = 10,
+ eRequestMessage_communicationModeRequest = 11,
+ eRequestMessage_conferenceRequest = 12,
+ eRequestMessage_multilinkRequest = 13,
+ eRequestMessage_logicalChannelRateRequest = 14,
+ } choice;
+ union {
+ OpenLogicalChannel openLogicalChannel;
+ };
+};
+
+typedef struct RequestMessage RequestMessage;
+
+struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters {
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters = 0,
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters = 1,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+};
+
+typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters;
+
+struct OpenLogicalChannelAck_reverseLogicalChannelParameters {
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber = -2147483648,
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters = 1073741824,
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor = 536870912,
+ } options;
+ OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters multiplexParameters;
+};
+
+typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters OpenLogicalChannelAck_reverseLogicalChannelParameters;
+
+struct H2250LogicalChannelAckParameters {
+ enum {
+ eH2250LogicalChannelAckParameters_nonStandard = -2147483648,
+ eH2250LogicalChannelAckParameters_sessionID = 1073741824,
+ eH2250LogicalChannelAckParameters_mediaChannel = 536870912,
+ eH2250LogicalChannelAckParameters_mediaControlChannel = 268435456,
+ eH2250LogicalChannelAckParameters_dynamicRTPPayloadType = 134217728,
+ eH2250LogicalChannelAckParameters_flowControlToZero = 67108864,
+ eH2250LogicalChannelAckParameters_portNumber = 33554432,
+ } options;
+ H245_TransportAddress mediaChannel;
+ H245_TransportAddress mediaControlChannel;
+};
+
+typedef struct H2250LogicalChannelAckParameters H2250LogicalChannelAckParameters;
+
+struct OpenLogicalChannelAck_forwardMultiplexAckParameters {
+ enum {
+ eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters = 0,
+ } choice;
+ union {
+ H2250LogicalChannelAckParameters h2250LogicalChannelAckParameters;
+ };
+};
+
+typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters OpenLogicalChannelAck_forwardMultiplexAckParameters;
+
+struct OpenLogicalChannelAck {
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters = -2147483648,
+ eOpenLogicalChannelAck_separateStack = 1073741824,
+ eOpenLogicalChannelAck_forwardMultiplexAckParameters = 536870912,
+ eOpenLogicalChannelAck_encryptionSync = 268435456,
+ } options;
+ OpenLogicalChannelAck_reverseLogicalChannelParameters reverseLogicalChannelParameters;
+ NetworkAccessParameters separateStack;
+ OpenLogicalChannelAck_forwardMultiplexAckParameters forwardMultiplexAckParameters;
+};
+
+typedef struct OpenLogicalChannelAck OpenLogicalChannelAck;
+
+struct ResponseMessage {
+ enum {
+ eResponseMessage_nonStandard = 0,
+ eResponseMessage_masterSlaveDeterminationAck = 1,
+ eResponseMessage_masterSlaveDeterminationReject = 2,
+ eResponseMessage_terminalCapabilitySetAck = 3,
+ eResponseMessage_terminalCapabilitySetReject = 4,
+ eResponseMessage_openLogicalChannelAck = 5,
+ eResponseMessage_openLogicalChannelReject = 6,
+ eResponseMessage_closeLogicalChannelAck = 7,
+ eResponseMessage_requestChannelCloseAck = 8,
+ eResponseMessage_requestChannelCloseReject = 9,
+ eResponseMessage_multiplexEntrySendAck = 10,
+ eResponseMessage_multiplexEntrySendReject = 11,
+ eResponseMessage_requestMultiplexEntryAck = 12,
+ eResponseMessage_requestMultiplexEntryReject = 13,
+ eResponseMessage_requestModeAck = 14,
+ eResponseMessage_requestModeReject = 15,
+ eResponseMessage_roundTripDelayResponse = 16,
+ eResponseMessage_maintenanceLoopAck = 17,
+ eResponseMessage_maintenanceLoopReject = 18,
+ eResponseMessage_communicationModeResponse = 19,
+ eResponseMessage_conferenceResponse = 20,
+ eResponseMessage_multilinkResponse = 21,
+ eResponseMessage_logicalChannelRateAcknowledge = 22,
+ eResponseMessage_logicalChannelRateReject = 23,
+ } choice;
+ union {
+ OpenLogicalChannelAck openLogicalChannelAck;
+ };
+};
+
+typedef struct ResponseMessage ResponseMessage;
+
+struct MultimediaSystemControlMessage {
+ enum {
+ eMultimediaSystemControlMessage_request = 0,
+ eMultimediaSystemControlMessage_response = 1,
+ eMultimediaSystemControlMessage_command = 2,
+ eMultimediaSystemControlMessage_indication = 3,
+ } choice;
+ union {
+ RequestMessage request;
+ ResponseMessage response;
+ };
+};
+
+typedef struct MultimediaSystemControlMessage MultimediaSystemControlMessage;
+
+struct H323_UU_PDU_h245Control {
+ int count;
+ MultimediaSystemControlMessage item[4];
+};
+
+typedef struct H323_UU_PDU_h245Control H323_UU_PDU_h245Control;
+
+struct H323_UU_PDU {
+ enum {
+ eH323_UU_PDU_nonStandardData = -2147483648,
+ eH323_UU_PDU_h4501SupplementaryService = 1073741824,
+ eH323_UU_PDU_h245Tunneling = 536870912,
+ eH323_UU_PDU_h245Control = 268435456,
+ eH323_UU_PDU_nonStandardControl = 134217728,
+ eH323_UU_PDU_callLinkage = 67108864,
+ eH323_UU_PDU_tunnelledSignallingMessage = 33554432,
+ eH323_UU_PDU_provisionalRespToH245Tunneling = 16777216,
+ eH323_UU_PDU_stimulusControl = 8388608,
+ eH323_UU_PDU_genericData = 4194304,
+ } options;
+ H323_UU_PDU_h323_message_body h323_message_body;
+ H323_UU_PDU_h245Control h245Control;
+};
+
+typedef struct H323_UU_PDU H323_UU_PDU;
+
+struct H323_UserInformation {
+ enum {
+ eH323_UserInformation_user_data = -2147483648,
+ } options;
+ H323_UU_PDU h323_uu_pdu;
+};
+
+typedef struct H323_UserInformation H323_UserInformation;
+
+struct GatekeeperRequest {
+ enum {
+ eGatekeeperRequest_nonStandardData = -2147483648,
+ eGatekeeperRequest_gatekeeperIdentifier = 1073741824,
+ eGatekeeperRequest_callServices = 536870912,
+ eGatekeeperRequest_endpointAlias = 268435456,
+ eGatekeeperRequest_alternateEndpoints = 134217728,
+ eGatekeeperRequest_tokens = 67108864,
+ eGatekeeperRequest_cryptoTokens = 33554432,
+ eGatekeeperRequest_authenticationCapability = 16777216,
+ eGatekeeperRequest_algorithmOIDs = 8388608,
+ eGatekeeperRequest_integrity = 4194304,
+ eGatekeeperRequest_integrityCheckValue = 2097152,
+ eGatekeeperRequest_supportsAltGK = 1048576,
+ eGatekeeperRequest_featureSet = 524288,
+ eGatekeeperRequest_genericData = 262144,
+ } options;
+ TransportAddress rasAddress;
+};
+
+typedef struct GatekeeperRequest GatekeeperRequest;
+
+struct GatekeeperConfirm {
+ enum {
+ eGatekeeperConfirm_nonStandardData = -2147483648,
+ eGatekeeperConfirm_gatekeeperIdentifier = 1073741824,
+ eGatekeeperConfirm_alternateGatekeeper = 536870912,
+ eGatekeeperConfirm_authenticationMode = 268435456,
+ eGatekeeperConfirm_tokens = 134217728,
+ eGatekeeperConfirm_cryptoTokens = 67108864,
+ eGatekeeperConfirm_algorithmOID = 33554432,
+ eGatekeeperConfirm_integrity = 16777216,
+ eGatekeeperConfirm_integrityCheckValue = 8388608,
+ eGatekeeperConfirm_featureSet = 4194304,
+ eGatekeeperConfirm_genericData = 2097152,
+ } options;
+ TransportAddress rasAddress;
+};
+
+typedef struct GatekeeperConfirm GatekeeperConfirm;
+
+struct RegistrationRequest_callSignalAddress {
+ int count;
+ TransportAddress item[10];
+};
+
+typedef struct RegistrationRequest_callSignalAddress RegistrationRequest_callSignalAddress;
+
+struct RegistrationRequest_rasAddress {
+ int count;
+ TransportAddress item[10];
+};
+
+typedef struct RegistrationRequest_rasAddress RegistrationRequest_rasAddress;
+
+struct RegistrationRequest {
+ enum {
+ eRegistrationRequest_nonStandardData = -2147483648,
+ eRegistrationRequest_terminalAlias = 1073741824,
+ eRegistrationRequest_gatekeeperIdentifier = 536870912,
+ eRegistrationRequest_alternateEndpoints = 268435456,
+ eRegistrationRequest_timeToLive = 134217728,
+ eRegistrationRequest_tokens = 67108864,
+ eRegistrationRequest_cryptoTokens = 33554432,
+ eRegistrationRequest_integrityCheckValue = 16777216,
+ eRegistrationRequest_keepAlive = 8388608,
+ eRegistrationRequest_endpointIdentifier = 4194304,
+ eRegistrationRequest_willSupplyUUIEs = 2097152,
+ eRegistrationRequest_maintainConnection = 1048576,
+ eRegistrationRequest_alternateTransportAddresses = 524288,
+ eRegistrationRequest_additiveRegistration = 262144,
+ eRegistrationRequest_terminalAliasPattern = 131072,
+ eRegistrationRequest_supportsAltGK = 65536,
+ eRegistrationRequest_usageReportingCapability = 32768,
+ eRegistrationRequest_multipleCalls = 16384,
+ eRegistrationRequest_supportedH248Packages = 8192,
+ eRegistrationRequest_callCreditCapability = 4096,
+ eRegistrationRequest_capacityReportingCapability = 2048,
+ eRegistrationRequest_capacity = 1024,
+ eRegistrationRequest_featureSet = 512,
+ eRegistrationRequest_genericData = 256,
+ } options;
+ RegistrationRequest_callSignalAddress callSignalAddress;
+ RegistrationRequest_rasAddress rasAddress;
+ unsigned int timeToLive;
+};
+
+typedef struct RegistrationRequest RegistrationRequest;
+
+struct RegistrationConfirm_callSignalAddress {
+ int count;
+ TransportAddress item[10];
+};
+
+typedef struct RegistrationConfirm_callSignalAddress RegistrationConfirm_callSignalAddress;
+
+struct RegistrationConfirm {
+ enum {
+ eRegistrationConfirm_nonStandardData = -2147483648,
+ eRegistrationConfirm_terminalAlias = 1073741824,
+ eRegistrationConfirm_gatekeeperIdentifier = 536870912,
+ eRegistrationConfirm_alternateGatekeeper = 268435456,
+ eRegistrationConfirm_timeToLive = 134217728,
+ eRegistrationConfirm_tokens = 67108864,
+ eRegistrationConfirm_cryptoTokens = 33554432,
+ eRegistrationConfirm_integrityCheckValue = 16777216,
+ eRegistrationConfirm_willRespondToIRR = 8388608,
+ eRegistrationConfirm_preGrantedARQ = 4194304,
+ eRegistrationConfirm_maintainConnection = 2097152,
+ eRegistrationConfirm_serviceControl = 1048576,
+ eRegistrationConfirm_supportsAdditiveRegistration = 524288,
+ eRegistrationConfirm_terminalAliasPattern = 262144,
+ eRegistrationConfirm_supportedPrefixes = 131072,
+ eRegistrationConfirm_usageSpec = 65536,
+ eRegistrationConfirm_featureServerAlias = 32768,
+ eRegistrationConfirm_capacityReportingSpec = 16384,
+ eRegistrationConfirm_featureSet = 8192,
+ eRegistrationConfirm_genericData = 4096,
+ } options;
+ RegistrationConfirm_callSignalAddress callSignalAddress;
+ unsigned int timeToLive;
+};
+
+typedef struct RegistrationConfirm RegistrationConfirm;
+
+struct UnregistrationRequest_callSignalAddress {
+ int count;
+ TransportAddress item[10];
+};
+
+typedef struct UnregistrationRequest_callSignalAddress UnregistrationRequest_callSignalAddress;
+
+struct UnregistrationRequest {
+ enum {
+ eUnregistrationRequest_endpointAlias = -2147483648,
+ eUnregistrationRequest_nonStandardData = 1073741824,
+ eUnregistrationRequest_endpointIdentifier = 536870912,
+ eUnregistrationRequest_alternateEndpoints = 268435456,
+ eUnregistrationRequest_gatekeeperIdentifier = 134217728,
+ eUnregistrationRequest_tokens = 67108864,
+ eUnregistrationRequest_cryptoTokens = 33554432,
+ eUnregistrationRequest_integrityCheckValue = 16777216,
+ eUnregistrationRequest_reason = 8388608,
+ eUnregistrationRequest_endpointAliasPattern = 4194304,
+ eUnregistrationRequest_supportedPrefixes = 2097152,
+ eUnregistrationRequest_alternateGatekeeper = 1048576,
+ eUnregistrationRequest_genericData = 524288,
+ } options;
+ UnregistrationRequest_callSignalAddress callSignalAddress;
+};
+
+typedef struct UnregistrationRequest UnregistrationRequest;
+
+struct AdmissionRequest {
+ enum {
+ eAdmissionRequest_callModel = -2147483648,
+ eAdmissionRequest_destinationInfo = 1073741824,
+ eAdmissionRequest_destCallSignalAddress = 536870912,
+ eAdmissionRequest_destExtraCallInfo = 268435456,
+ eAdmissionRequest_srcCallSignalAddress = 134217728,
+ eAdmissionRequest_nonStandardData = 67108864,
+ eAdmissionRequest_callServices = 33554432,
+ eAdmissionRequest_canMapAlias = 16777216,
+ eAdmissionRequest_callIdentifier = 8388608,
+ eAdmissionRequest_srcAlternatives = 4194304,
+ eAdmissionRequest_destAlternatives = 2097152,
+ eAdmissionRequest_gatekeeperIdentifier = 1048576,
+ eAdmissionRequest_tokens = 524288,
+ eAdmissionRequest_cryptoTokens = 262144,
+ eAdmissionRequest_integrityCheckValue = 131072,
+ eAdmissionRequest_transportQOS = 65536,
+ eAdmissionRequest_willSupplyUUIEs = 32768,
+ eAdmissionRequest_callLinkage = 16384,
+ eAdmissionRequest_gatewayDataRate = 8192,
+ eAdmissionRequest_capacity = 4096,
+ eAdmissionRequest_circuitInfo = 2048,
+ eAdmissionRequest_desiredProtocols = 1024,
+ eAdmissionRequest_desiredTunnelledProtocol = 512,
+ eAdmissionRequest_featureSet = 256,
+ eAdmissionRequest_genericData = 128,
+ } options;
+ TransportAddress destCallSignalAddress;
+ TransportAddress srcCallSignalAddress;
+};
+
+typedef struct AdmissionRequest AdmissionRequest;
+
+struct AdmissionConfirm {
+ enum {
+ eAdmissionConfirm_irrFrequency = -2147483648,
+ eAdmissionConfirm_nonStandardData = 1073741824,
+ eAdmissionConfirm_destinationInfo = 536870912,
+ eAdmissionConfirm_destExtraCallInfo = 268435456,
+ eAdmissionConfirm_destinationType = 134217728,
+ eAdmissionConfirm_remoteExtensionAddress = 67108864,
+ eAdmissionConfirm_alternateEndpoints = 33554432,
+ eAdmissionConfirm_tokens = 16777216,
+ eAdmissionConfirm_cryptoTokens = 8388608,
+ eAdmissionConfirm_integrityCheckValue = 4194304,
+ eAdmissionConfirm_transportQOS = 2097152,
+ eAdmissionConfirm_willRespondToIRR = 1048576,
+ eAdmissionConfirm_uuiesRequested = 524288,
+ eAdmissionConfirm_language = 262144,
+ eAdmissionConfirm_alternateTransportAddresses = 131072,
+ eAdmissionConfirm_useSpecifiedTransport = 65536,
+ eAdmissionConfirm_circuitInfo = 32768,
+ eAdmissionConfirm_usageSpec = 16384,
+ eAdmissionConfirm_supportedProtocols = 8192,
+ eAdmissionConfirm_serviceControl = 4096,
+ eAdmissionConfirm_multipleCalls = 2048,
+ eAdmissionConfirm_featureSet = 1024,
+ eAdmissionConfirm_genericData = 512,
+ } options;
+ TransportAddress destCallSignalAddress;
+};
+
+typedef struct AdmissionConfirm AdmissionConfirm;
+
+struct LocationRequest {
+ enum {
+ eLocationRequest_endpointIdentifier = -2147483648,
+ eLocationRequest_nonStandardData = 1073741824,
+ eLocationRequest_sourceInfo = 536870912,
+ eLocationRequest_canMapAlias = 268435456,
+ eLocationRequest_gatekeeperIdentifier = 134217728,
+ eLocationRequest_tokens = 67108864,
+ eLocationRequest_cryptoTokens = 33554432,
+ eLocationRequest_integrityCheckValue = 16777216,
+ eLocationRequest_desiredProtocols = 8388608,
+ eLocationRequest_desiredTunnelledProtocol = 4194304,
+ eLocationRequest_featureSet = 2097152,
+ eLocationRequest_genericData = 1048576,
+ eLocationRequest_hopCount = 524288,
+ eLocationRequest_circuitInfo = 262144,
+ } options;
+ TransportAddress replyAddress;
+};
+
+typedef struct LocationRequest LocationRequest;
+
+struct LocationConfirm {
+ enum {
+ eLocationConfirm_nonStandardData = -2147483648,
+ eLocationConfirm_destinationInfo = 1073741824,
+ eLocationConfirm_destExtraCallInfo = 536870912,
+ eLocationConfirm_destinationType = 268435456,
+ eLocationConfirm_remoteExtensionAddress = 134217728,
+ eLocationConfirm_alternateEndpoints = 67108864,
+ eLocationConfirm_tokens = 33554432,
+ eLocationConfirm_cryptoTokens = 16777216,
+ eLocationConfirm_integrityCheckValue = 8388608,
+ eLocationConfirm_alternateTransportAddresses = 4194304,
+ eLocationConfirm_supportedProtocols = 2097152,
+ eLocationConfirm_multipleCalls = 1048576,
+ eLocationConfirm_featureSet = 524288,
+ eLocationConfirm_genericData = 262144,
+ eLocationConfirm_circuitInfo = 131072,
+ eLocationConfirm_serviceControl = 65536,
+ } options;
+ TransportAddress callSignalAddress;
+ TransportAddress rasAddress;
+};
+
+typedef struct LocationConfirm LocationConfirm;
+
+struct InfoRequestResponse_callSignalAddress {
+ int count;
+ TransportAddress item[10];
+};
+
+typedef struct InfoRequestResponse_callSignalAddress InfoRequestResponse_callSignalAddress;
+
+struct InfoRequestResponse {
+ enum {
+ eInfoRequestResponse_nonStandardData = -2147483648,
+ eInfoRequestResponse_endpointAlias = 1073741824,
+ eInfoRequestResponse_perCallInfo = 536870912,
+ eInfoRequestResponse_tokens = 268435456,
+ eInfoRequestResponse_cryptoTokens = 134217728,
+ eInfoRequestResponse_integrityCheckValue = 67108864,
+ eInfoRequestResponse_needResponse = 33554432,
+ eInfoRequestResponse_capacity = 16777216,
+ eInfoRequestResponse_irrStatus = 8388608,
+ eInfoRequestResponse_unsolicited = 4194304,
+ eInfoRequestResponse_genericData = 2097152,
+ } options;
+ TransportAddress rasAddress;
+ InfoRequestResponse_callSignalAddress callSignalAddress;
+};
+
+typedef struct InfoRequestResponse InfoRequestResponse;
+
+struct RasMessage {
+ enum {
+ eRasMessage_gatekeeperRequest = 0,
+ eRasMessage_gatekeeperConfirm = 1,
+ eRasMessage_gatekeeperReject = 2,
+ eRasMessage_registrationRequest = 3,
+ eRasMessage_registrationConfirm = 4,
+ eRasMessage_registrationReject = 5,
+ eRasMessage_unregistrationRequest = 6,
+ eRasMessage_unregistrationConfirm = 7,
+ eRasMessage_unregistrationReject = 8,
+ eRasMessage_admissionRequest = 9,
+ eRasMessage_admissionConfirm = 10,
+ eRasMessage_admissionReject = 11,
+ eRasMessage_bandwidthRequest = 12,
+ eRasMessage_bandwidthConfirm = 13,
+ eRasMessage_bandwidthReject = 14,
+ eRasMessage_disengageRequest = 15,
+ eRasMessage_disengageConfirm = 16,
+ eRasMessage_disengageReject = 17,
+ eRasMessage_locationRequest = 18,
+ eRasMessage_locationConfirm = 19,
+ eRasMessage_locationReject = 20,
+ eRasMessage_infoRequest = 21,
+ eRasMessage_infoRequestResponse = 22,
+ eRasMessage_nonStandardMessage = 23,
+ eRasMessage_unknownMessageResponse = 24,
+ eRasMessage_requestInProgress = 25,
+ eRasMessage_resourcesAvailableIndicate = 26,
+ eRasMessage_resourcesAvailableConfirm = 27,
+ eRasMessage_infoRequestAck = 28,
+ eRasMessage_infoRequestNak = 29,
+ eRasMessage_serviceControlIndication = 30,
+ eRasMessage_serviceControlResponse = 31,
+ } choice;
+ union {
+ GatekeeperRequest gatekeeperRequest;
+ GatekeeperConfirm gatekeeperConfirm;
+ RegistrationRequest registrationRequest;
+ RegistrationConfirm registrationConfirm;
+ UnregistrationRequest unregistrationRequest;
+ AdmissionRequest admissionRequest;
+ AdmissionConfirm admissionConfirm;
+ LocationRequest locationRequest;
+ LocationConfirm locationConfirm;
+ InfoRequestResponse infoRequestResponse;
+ };
+};
+
+typedef struct RasMessage RasMessage;
+
+typedef struct {
+ enum {
+ Q931_NationalEscape = 0,
+ Q931_Alerting = 1,
+ Q931_CallProceeding = 2,
+ Q931_Connect = 7,
+ Q931_ConnectAck = 15,
+ Q931_Progress = 3,
+ Q931_Setup = 5,
+ Q931_SetupAck = 13,
+ Q931_Resume = 38,
+ Q931_ResumeAck = 46,
+ Q931_ResumeReject = 34,
+ Q931_Suspend = 37,
+ Q931_SuspendAck = 45,
+ Q931_SuspendReject = 33,
+ Q931_UserInformation = 32,
+ Q931_Disconnect = 69,
+ Q931_Release = 77,
+ Q931_ReleaseComplete = 90,
+ Q931_Restart = 70,
+ Q931_RestartAck = 78,
+ Q931_Segment = 96,
+ Q931_CongestionCtrl = 121,
+ Q931_Information = 123,
+ Q931_Notify = 110,
+ Q931_Status = 125,
+ Q931_StatusEnquiry = 117,
+ Q931_Facility = 98,
+ } MessageType;
+ H323_UserInformation UUIE;
+} Q931;
+
+struct nf_ct_h323_master {
+ __be16 sig_port[2];
+ __be16 rtp_port[8];
+ union {
+ u_int32_t timeout;
+ u_int16_t tpkt_len[2];
+ };
+};
+
+struct field_t {
+ unsigned char type;
+ unsigned char sz;
+ unsigned char lb;
+ unsigned char ub;
+ short unsigned int attr;
+ short unsigned int offset;
+ const struct field_t *fields;
+};
+
+struct bitstr {
+ unsigned char *buf;
+ unsigned char *beg;
+ unsigned char *end;
+ unsigned char *cur;
+ unsigned int bit;
+};
+
+typedef int (*decoder_t)(struct bitstr *, const struct field_t *, char *, int);
+
+struct pptp_pkt_hdr {
+ __u16 packetLength;
+ __be16 packetType;
+ __be32 magicCookie;
+};
+
+struct nf_conn_nat;
+
+enum sane_state {
+ SANE_STATE_NORMAL = 0,
+ SANE_STATE_START_REQUESTED = 1,
+};
+
+struct nf_ct_sane_master {
+ enum sane_state state;
+};
+
+struct sane_request {
+ __be32 RPC_code;
+ __be32 handle;
+};
+
+struct sane_reply_net_start {
+ __be32 status;
+ __be16 zero;
+ __be16 port;
+};
+
+struct nf_ct_sip_master {
+ unsigned int register_cseq;
+ unsigned int invite_cseq;
+ __be16 forced_dport;
+};
+
+enum sip_expectation_classes {
+ SIP_EXPECT_SIGNALLING = 0,
+ SIP_EXPECT_AUDIO = 1,
+ SIP_EXPECT_VIDEO = 2,
+ SIP_EXPECT_IMAGE = 3,
+ __SIP_EXPECT_MAX = 4,
+};
+
+struct sdp_media_type {
+ const char *name;
+ unsigned int len;
+ enum sip_expectation_classes class;
+};
+
+struct sip_handler {
+ const char *method;
+ unsigned int len;
+ int (*request)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, unsigned int);
+ int (*response)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, unsigned int, unsigned int);
+};
+
+struct sip_header {
+ const char *name;
+ const char *cname;
+ const char *search;
+ unsigned int len;
+ unsigned int clen;
+ unsigned int slen;
+ int (*match_len)(const struct nf_conn *, const char *, const char *, int *);
+};
+
+enum sip_header_types {
+ SIP_HDR_CSEQ = 0,
+ SIP_HDR_FROM = 1,
+ SIP_HDR_TO = 2,
+ SIP_HDR_CONTACT = 3,
+ SIP_HDR_VIA_UDP = 4,
+ SIP_HDR_VIA_TCP = 5,
+ SIP_HDR_EXPIRES = 6,
+ SIP_HDR_CONTENT_LENGTH = 7,
+ SIP_HDR_CALL_ID = 8,
+};
+
+enum sdp_header_types {
+ SDP_HDR_UNSPEC = 0,
+ SDP_HDR_VERSION = 1,
+ SDP_HDR_OWNER = 2,
+ SDP_HDR_CONNECTION = 3,
+ SDP_HDR_MEDIA = 4,
+};
+
+struct nf_nat_sip_hooks {
+ unsigned int (*msg)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *);
+ void (*seq_adjust)(struct sk_buff *, unsigned int, s16);
+ unsigned int (*expect)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, struct nf_conntrack_expect *, unsigned int, unsigned int);
+ unsigned int (*sdp_addr)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, unsigned int, enum sdp_header_types, enum sdp_header_types, const union nf_inet_addr *);
+ unsigned int (*sdp_port)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, unsigned int, unsigned int, u_int16_t);
+ unsigned int (*sdp_session)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, unsigned int, const union nf_inet_addr *);
+ unsigned int (*sdp_media)(struct sk_buff *, unsigned int, unsigned int, const char **, unsigned int *, struct nf_conntrack_expect *, struct nf_conntrack_expect *, unsigned int, unsigned int, union nf_inet_addr *);
+};
+
+struct tftphdr {
+ __be16 opcode;
+};
+
+struct icmp_err {
+ int errno;
+ unsigned int fatal: 1;
+};
+
+struct nf_log_buf___2;
+
+struct nf_nat_pptp {
+ __be16 pns_call_id;
+ __be16 pac_call_id;
+};
+
+struct nf_nat_range2 {
+ unsigned int flags;
+ union nf_inet_addr min_addr;
+ union nf_inet_addr max_addr;
+ union nf_conntrack_man_proto min_proto;
+ union nf_conntrack_man_proto max_proto;
+ union nf_conntrack_man_proto base_proto;
+};
+
+union nf_conntrack_nat_help {
+ struct nf_nat_pptp nat_pptp_info;
+};
+
+struct nf_conn_nat___2 {
+ union nf_conntrack_nat_help help;
+ int masq_index;
+};
+
+struct nf_nat_lookup_hook_priv {
+ struct nf_hook_entries *entries;
+ struct callback_head callback_head;
+};
+
+struct nf_nat_hooks_net {
+ struct nf_hook_ops *nat_hook_ops;
+ unsigned int users;
+};
+
+struct nat_net {
+ struct nf_nat_hooks_net nat_proto_net[13];
+};
+
+struct nf_nat_proto_clean {
+ u8 l3proto;
+ u8 l4proto;
+};
+
+enum ctattr_nat {
+ CTA_NAT_UNSPEC = 0,
+ CTA_NAT_V4_MINIP = 1,
+ CTA_NAT_V4_MAXIP = 2,
+ CTA_NAT_PROTO = 3,
+ CTA_NAT_V6_MINIP = 4,
+ CTA_NAT_V6_MAXIP = 5,
+ __CTA_NAT_MAX = 6,
+};
+
+enum ctattr_protonat {
+ CTA_PROTONAT_UNSPEC = 0,
+ CTA_PROTONAT_PORT_MIN = 1,
+ CTA_PROTONAT_PORT_MAX = 2,
+ __CTA_PROTONAT_MAX = 3,
+};
+
+struct inet6_ifaddr {
+ struct in6_addr addr;
+ __u32 prefix_len;
+ __u32 rt_priority;
+ __u32 valid_lft;
+ __u32 prefered_lft;
+ refcount_t refcnt;
+ spinlock_t lock;
+ int state;
+ __u32 flags;
+ __u8 dad_probes;
+ __u8 stable_privacy_retry;
+ __u16 scope;
+ __u64 dad_nonce;
+ long unsigned int cstamp;
+ long unsigned int tstamp;
+ struct delayed_work dad_work;
+ struct inet6_dev *idev;
+ struct fib6_info *rt;
+ struct hlist_node addr_lst;
+ struct list_head if_list;
+ struct list_head tmp_list;
+ struct inet6_ifaddr *ifpub;
+ int regen_count;
+ bool tokenized;
+ struct callback_head rcu;
+ struct in6_addr peer_addr;
+};
+
+struct nf_nat_ipv4_range {
+ unsigned int flags;
+ __be32 min_ip;
+ __be32 max_ip;
+ union nf_conntrack_man_proto min;
+ union nf_conntrack_man_proto max;
+};
+
+struct nf_nat_ipv4_multi_range_compat {
+ unsigned int rangesize;
+ struct nf_nat_ipv4_range range[1];
+};
+
+struct masq_dev_work {
+ struct work_struct work;
+ struct net *net;
+ struct in6_addr addr;
+ int ifindex;
+};
+
+struct nf_synproxy_info {
+ __u8 options;
+ __u8 wscale;
+ __u16 mss;
+};
+
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats *stats;
+ unsigned int hook_ref4;
+ unsigned int hook_ref6;
+};
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss_option;
+ u16 mss_encode;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct nf_conncount_list {
+ spinlock_t list_lock;
+ struct list_head head;
+ unsigned int count;
+};
+
+struct nf_conncount_tuple {
+ struct list_head node;
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_zone zone;
+ int cpu;
+ u32 jiffies32;
+};
+
+struct nf_conncount_rb {
+ struct rb_node node;
+ struct nf_conncount_list list;
+ u32 key[5];
+ struct callback_head callback_head;
+};
+
+struct nf_conncount_data {
+ unsigned int keylen;
+ struct rb_root root[256];
+ struct net *net;
+ struct work_struct gc_work;
+ long unsigned int pending_trees[4];
+ unsigned int gc_tree;
+};
+
+struct xt_action_param;
+
+struct xt_mtchk_param;
+
+struct xt_mtdtor_param;
+
+struct xt_match {
+ struct list_head list;
+ const char name[29];
+ u_int8_t revision;
+ bool (*match)(const struct sk_buff *, struct xt_action_param *);
+ int (*checkentry)(const struct xt_mtchk_param *);
+ void (*destroy)(const struct xt_mtdtor_param *);
+ struct module *me;
+ const char *table;
+ unsigned int matchsize;
+ unsigned int usersize;
+ unsigned int hooks;
+ short unsigned int proto;
+ short unsigned int family;
+};
+
+struct xt_entry_match {
+ union {
+ struct {
+ __u16 match_size;
+ char name[29];
+ __u8 revision;
+ } user;
+ struct {
+ __u16 match_size;
+ struct xt_match *match;
+ } kernel;
+ __u16 match_size;
+ } u;
+ unsigned char data[0];
+};
+
+struct xt_tgchk_param;
+
+struct xt_tgdtor_param;
+
+struct xt_target {
+ struct list_head list;
+ const char name[29];
+ u_int8_t revision;
+ unsigned int (*target)(struct sk_buff *, const struct xt_action_param *);
+ int (*checkentry)(const struct xt_tgchk_param *);
+ void (*destroy)(const struct xt_tgdtor_param *);
+ struct module *me;
+ const char *table;
+ unsigned int targetsize;
+ unsigned int usersize;
+ unsigned int hooks;
+ short unsigned int proto;
+ short unsigned int family;
+};
+
+struct xt_entry_target {
+ union {
+ struct {
+ __u16 target_size;
+ char name[29];
+ __u8 revision;
+ } user;
+ struct {
+ __u16 target_size;
+ struct xt_target *target;
+ } kernel;
+ __u16 target_size;
+ } u;
+ unsigned char data[0];
+};
+
+struct xt_standard_target {
+ struct xt_entry_target target;
+ int verdict;
+};
+
+struct xt_error_target {
+ struct xt_entry_target target;
+ char errorname[30];
+};
+
+struct xt_counters {
+ __u64 pcnt;
+ __u64 bcnt;
+};
+
+struct xt_counters_info {
+ char name[32];
+ unsigned int num_counters;
+ struct xt_counters counters[0];
+};
+
+struct xt_action_param {
+ union {
+ const struct xt_match *match;
+ const struct xt_target *target;
+ };
+ union {
+ const void *matchinfo;
+ const void *targinfo;
+ };
+ const struct nf_hook_state *state;
+ int fragoff;
+ unsigned int thoff;
+ bool hotdrop;
+};
+
+struct xt_mtchk_param {
+ struct net *net;
+ const char *table;
+ const void *entryinfo;
+ const struct xt_match *match;
+ void *matchinfo;
+ unsigned int hook_mask;
+ u_int8_t family;
+ bool nft_compat;
+};
+
+struct xt_mtdtor_param {
+ struct net *net;
+ const struct xt_match *match;
+ void *matchinfo;
+ u_int8_t family;
+};
+
+struct xt_tgchk_param {
+ struct net *net;
+ const char *table;
+ const void *entryinfo;
+ const struct xt_target *target;
+ void *targinfo;
+ unsigned int hook_mask;
+ u_int8_t family;
+ bool nft_compat;
+};
+
+struct xt_tgdtor_param {
+ struct net *net;
+ const struct xt_target *target;
+ void *targinfo;
+ u_int8_t family;
+};
+
+struct xt_percpu_counter_alloc_state {
+ unsigned int off;
+ const char *mem;
+};
+
+struct xt_af {
+ struct mutex mutex;
+ struct list_head match;
+ struct list_head target;
+};
+
+struct nf_mttg_trav {
+ struct list_head *head;
+ struct list_head *curr;
+ uint8_t class;
+};
+
+enum {
+ MTTG_TRAV_INIT = 0,
+ MTTG_TRAV_NFP_UNSPEC = 1,
+ MTTG_TRAV_NFP_SPEC = 2,
+ MTTG_TRAV_DONE = 3,
+};
+
+struct xt_tcp {
+ __u16 spts[2];
+ __u16 dpts[2];
+ __u8 option;
+ __u8 flg_mask;
+ __u8 flg_cmp;
+ __u8 invflags;
+};
+
+struct xt_udp {
+ __u16 spts[2];
+ __u16 dpts[2];
+ __u8 invflags;
+};
+
+struct xt_mark_tginfo2 {
+ __u32 mark;
+ __u32 mask;
+};
+
+struct xt_mark_mtinfo1 {
+ __u32 mark;
+ __u32 mask;
+ __u8 invert;
+};
+
+enum {
+ XT_CONNMARK_SET = 0,
+ XT_CONNMARK_SAVE = 1,
+ XT_CONNMARK_RESTORE = 2,
+};
+
+enum {
+ D_SHIFT_LEFT = 0,
+ D_SHIFT_RIGHT = 1,
+};
+
+struct xt_connmark_tginfo1 {
+ __u32 ctmark;
+ __u32 ctmask;
+ __u32 nfmask;
+ __u8 mode;
+};
+
+struct xt_connmark_tginfo2 {
+ __u32 ctmark;
+ __u32 ctmask;
+ __u32 nfmask;
+ __u8 shift_dir;
+ __u8 shift_bits;
+ __u8 mode;
+};
+
+struct xt_connmark_mtinfo1 {
+ __u32 mark;
+ __u32 mask;
+ __u8 invert;
+};
+
+enum {
+ IPSET_ATTR_UNSPEC = 0,
+ IPSET_ATTR_PROTOCOL = 1,
+ IPSET_ATTR_SETNAME = 2,
+ IPSET_ATTR_TYPENAME = 3,
+ IPSET_ATTR_SETNAME2 = 3,
+ IPSET_ATTR_REVISION = 4,
+ IPSET_ATTR_FAMILY = 5,
+ IPSET_ATTR_FLAGS = 6,
+ IPSET_ATTR_DATA = 7,
+ IPSET_ATTR_ADT = 8,
+ IPSET_ATTR_LINENO = 9,
+ IPSET_ATTR_PROTOCOL_MIN = 10,
+ IPSET_ATTR_REVISION_MIN = 10,
+ IPSET_ATTR_INDEX = 11,
+ __IPSET_ATTR_CMD_MAX = 12,
+};
+
+enum {
+ IPSET_ATTR_IP = 1,
+ IPSET_ATTR_IP_FROM = 1,
+ IPSET_ATTR_IP_TO = 2,
+ IPSET_ATTR_CIDR = 3,
+ IPSET_ATTR_PORT = 4,
+ IPSET_ATTR_PORT_FROM = 4,
+ IPSET_ATTR_PORT_TO = 5,
+ IPSET_ATTR_TIMEOUT = 6,
+ IPSET_ATTR_PROTO = 7,
+ IPSET_ATTR_CADT_FLAGS = 8,
+ IPSET_ATTR_CADT_LINENO = 9,
+ IPSET_ATTR_MARK = 10,
+ IPSET_ATTR_MARKMASK = 11,
+ IPSET_ATTR_CADT_MAX = 16,
+ IPSET_ATTR_GC = 17,
+ IPSET_ATTR_HASHSIZE = 18,
+ IPSET_ATTR_MAXELEM = 19,
+ IPSET_ATTR_NETMASK = 20,
+ IPSET_ATTR_PROBES = 21,
+ IPSET_ATTR_RESIZE = 22,
+ IPSET_ATTR_SIZE = 23,
+ IPSET_ATTR_ELEMENTS = 24,
+ IPSET_ATTR_REFERENCES = 25,
+ IPSET_ATTR_MEMSIZE = 26,
+ __IPSET_ATTR_CREATE_MAX = 27,
+};
+
+enum {
+ IPSET_ATTR_ETHER = 17,
+ IPSET_ATTR_NAME = 18,
+ IPSET_ATTR_NAMEREF = 19,
+ IPSET_ATTR_IP2 = 20,
+ IPSET_ATTR_CIDR2 = 21,
+ IPSET_ATTR_IP2_TO = 22,
+ IPSET_ATTR_IFACE = 23,
+ IPSET_ATTR_BYTES = 24,
+ IPSET_ATTR_PACKETS = 25,
+ IPSET_ATTR_COMMENT = 26,
+ IPSET_ATTR_SKBMARK = 27,
+ IPSET_ATTR_SKBPRIO = 28,
+ IPSET_ATTR_SKBQUEUE = 29,
+ IPSET_ATTR_PAD = 30,
+ __IPSET_ATTR_ADT_MAX = 31,
+};
+
+enum ipset_cmd_flags {
+ IPSET_FLAG_BIT_EXIST = 0,
+ IPSET_FLAG_EXIST = 1,
+ IPSET_FLAG_BIT_LIST_SETNAME = 1,
+ IPSET_FLAG_LIST_SETNAME = 2,
+ IPSET_FLAG_BIT_LIST_HEADER = 2,
+ IPSET_FLAG_LIST_HEADER = 4,
+ IPSET_FLAG_BIT_SKIP_COUNTER_UPDATE = 3,
+ IPSET_FLAG_SKIP_COUNTER_UPDATE = 8,
+ IPSET_FLAG_BIT_SKIP_SUBCOUNTER_UPDATE = 4,
+ IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE = 16,
+ IPSET_FLAG_BIT_MATCH_COUNTERS = 5,
+ IPSET_FLAG_MATCH_COUNTERS = 32,
+ IPSET_FLAG_BIT_RETURN_NOMATCH = 7,
+ IPSET_FLAG_RETURN_NOMATCH = 128,
+ IPSET_FLAG_BIT_MAP_SKBMARK = 8,
+ IPSET_FLAG_MAP_SKBMARK = 256,
+ IPSET_FLAG_BIT_MAP_SKBPRIO = 9,
+ IPSET_FLAG_MAP_SKBPRIO = 512,
+ IPSET_FLAG_BIT_MAP_SKBQUEUE = 10,
+ IPSET_FLAG_MAP_SKBQUEUE = 1024,
+ IPSET_FLAG_CMD_MAX = 15,
+};
+
+enum ipset_cadt_flags {
+ IPSET_FLAG_BIT_BEFORE = 0,
+ IPSET_FLAG_BEFORE = 1,
+ IPSET_FLAG_BIT_PHYSDEV = 1,
+ IPSET_FLAG_PHYSDEV = 2,
+ IPSET_FLAG_BIT_NOMATCH = 2,
+ IPSET_FLAG_NOMATCH = 4,
+ IPSET_FLAG_BIT_WITH_COUNTERS = 3,
+ IPSET_FLAG_WITH_COUNTERS = 8,
+ IPSET_FLAG_BIT_WITH_COMMENT = 4,
+ IPSET_FLAG_WITH_COMMENT = 16,
+ IPSET_FLAG_BIT_WITH_FORCEADD = 5,
+ IPSET_FLAG_WITH_FORCEADD = 32,
+ IPSET_FLAG_BIT_WITH_SKBINFO = 6,
+ IPSET_FLAG_WITH_SKBINFO = 64,
+ IPSET_FLAG_BIT_IFACE_WILDCARD = 7,
+ IPSET_FLAG_IFACE_WILDCARD = 128,
+ IPSET_FLAG_CADT_MAX = 15,
+};
+
+enum ipset_adt {
+ IPSET_ADD = 0,
+ IPSET_DEL = 1,
+ IPSET_TEST = 2,
+ IPSET_ADT_MAX = 3,
+ IPSET_CREATE = 3,
+ IPSET_CADT_MAX = 4,
+};
+
+typedef __u16 ip_set_id_t;
+
+enum ip_set_dim {
+ IPSET_DIM_ZERO = 0,
+ IPSET_DIM_ONE = 1,
+ IPSET_DIM_TWO = 2,
+ IPSET_DIM_THREE = 3,
+ IPSET_DIM_MAX = 6,
+ IPSET_BIT_RETURN_NOMATCH = 7,
+};
+
+enum ip_set_kopt {
+ IPSET_INV_MATCH = 1,
+ IPSET_DIM_ONE_SRC = 2,
+ IPSET_DIM_TWO_SRC = 4,
+ IPSET_DIM_THREE_SRC = 8,
+ IPSET_RETURN_NOMATCH = 128,
+};
+
+enum {
+ IPSET_COUNTER_NONE = 0,
+ IPSET_COUNTER_EQ = 1,
+ IPSET_COUNTER_NE = 2,
+ IPSET_COUNTER_LT = 3,
+ IPSET_COUNTER_GT = 4,
+};
+
+struct ip_set_counter_match0 {
+ __u8 op;
+ __u64 value;
+};
+
+struct ip_set_counter_match {
+ __u64 value;
+ __u8 op;
+};
+
+enum ip_set_extension {
+ IPSET_EXT_BIT_TIMEOUT = 0,
+ IPSET_EXT_TIMEOUT = 1,
+ IPSET_EXT_BIT_COUNTER = 1,
+ IPSET_EXT_COUNTER = 2,
+ IPSET_EXT_BIT_COMMENT = 2,
+ IPSET_EXT_COMMENT = 4,
+ IPSET_EXT_BIT_SKBINFO = 3,
+ IPSET_EXT_SKBINFO = 8,
+ IPSET_EXT_BIT_DESTROY = 7,
+ IPSET_EXT_DESTROY = 128,
+};
+
+enum ip_set_ext_id {
+ IPSET_EXT_ID_COUNTER = 0,
+ IPSET_EXT_ID_TIMEOUT = 1,
+ IPSET_EXT_ID_SKBINFO = 2,
+ IPSET_EXT_ID_COMMENT = 3,
+ IPSET_EXT_ID_MAX = 4,
+};
+
+struct ip_set;
+
+struct ip_set_ext_type {
+ void (*destroy)(struct ip_set *, void *);
+ enum ip_set_extension type;
+ enum ipset_cadt_flags flag;
+ u8 len;
+ u8 align;
+};
+
+struct ip_set_type;
+
+struct ip_set_type_variant;
+
+struct ip_set {
+ char name[32];
+ spinlock_t lock;
+ u32 ref;
+ u32 ref_netlink;
+ struct ip_set_type *type;
+ const struct ip_set_type_variant *variant;
+ u8 family;
+ u8 revision;
+ u8 extensions;
+ u8 flags;
+ u32 timeout;
+ u32 elements;
+ size_t ext_size;
+ size_t dsize;
+ size_t offset[4];
+ void *data;
+};
+
+struct ip_set_skbinfo {
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
+ u16 __pad;
+};
+
+struct ip_set_ext {
+ struct ip_set_skbinfo skbinfo;
+ u64 packets;
+ u64 bytes;
+ char *comment;
+ u32 timeout;
+ u8 packets_op;
+ u8 bytes_op;
+ bool target;
+};
+
+typedef int (*ipset_adtfn)(struct ip_set *, void *, const struct ip_set_ext *, struct ip_set_ext *, u32);
+
+struct ip_set_adt_opt {
+ u8 family;
+ u8 dim;
+ u8 flags;
+ u32 cmdflags;
+ struct ip_set_ext ext;
+};
+
+struct ip_set_type_variant {
+ int (*kadt)(struct ip_set *, const struct sk_buff *, const struct xt_action_param *, enum ipset_adt, struct ip_set_adt_opt *);
+ int (*uadt)(struct ip_set *, struct nlattr **, enum ipset_adt, u32 *, u32, bool);
+ ipset_adtfn adt[3];
+ int (*resize)(struct ip_set *, bool);
+ void (*destroy)(struct ip_set *);
+ void (*flush)(struct ip_set *);
+ void (*expire)(struct ip_set *);
+ int (*head)(struct ip_set *, struct sk_buff *);
+ int (*list)(const struct ip_set *, struct sk_buff *, struct netlink_callback *);
+ void (*uref)(struct ip_set *, struct netlink_callback *, bool);
+ bool (*same_set)(const struct ip_set *, const struct ip_set *);
+ bool region_lock;
+};
+
+struct ip_set_type {
+ struct list_head list;
+ char name[32];
+ u8 protocol;
+ u8 dimension;
+ u8 family;
+ u8 revision_min;
+ u8 revision_max;
+ u16 features;
+ int (*create)(struct net *, struct ip_set *, struct nlattr **, u32);
+ const struct nla_policy create_policy[27];
+ const struct nla_policy adt_policy[31];
+ struct module *me;
+};
+
+struct xt_set_info_v0 {
+ ip_set_id_t index;
+ union {
+ __u32 flags[7];
+ struct {
+ __u32 __flags[6];
+ __u8 dim;
+ __u8 flags;
+ } compat;
+ } u;
+};
+
+struct xt_set_info_match_v0 {
+ struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+ struct xt_set_info_v0 add_set;
+ struct xt_set_info_v0 del_set;
+};
+
+struct xt_set_info {
+ ip_set_id_t index;
+ __u8 dim;
+ __u8 flags;
+};
+
+struct xt_set_info_match_v1 {
+ struct xt_set_info match_set;
+};
+
+struct xt_set_info_target_v1 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+};
+
+struct xt_set_info_target_v2 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+ __u32 flags;
+ __u32 timeout;
+};
+
+struct xt_set_info_match_v3 {
+ struct xt_set_info match_set;
+ struct ip_set_counter_match0 packets;
+ struct ip_set_counter_match0 bytes;
+ __u32 flags;
+};
+
+struct xt_set_info_target_v3 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+ struct xt_set_info map_set;
+ __u32 flags;
+ __u32 timeout;
+};
+
+struct xt_set_info_match_v4 {
+ struct xt_set_info match_set;
+ struct ip_set_counter_match packets;
+ struct ip_set_counter_match bytes;
+ __u32 flags;
+};
+
+struct nf_nat_range {
+ unsigned int flags;
+ union nf_inet_addr min_addr;
+ union nf_inet_addr max_addr;
+ union nf_conntrack_man_proto min_proto;
+ union nf_conntrack_man_proto max_proto;
+};
+
+struct xt_CHECKSUM_info {
+ __u8 operation;
+};
+
+struct ipt_ip {
+ struct in_addr src;
+ struct in_addr dst;
+ struct in_addr smsk;
+ struct in_addr dmsk;
+ char iniface[16];
+ char outiface[16];
+ unsigned char iniface_mask[16];
+ unsigned char outiface_mask[16];
+ __u16 proto;
+ __u8 flags;
+ __u8 invflags;
+};
+
+struct ip6t_ip6 {
+ struct in6_addr src;
+ struct in6_addr dst;
+ struct in6_addr smsk;
+ struct in6_addr dmsk;
+ char iniface[16];
+ char outiface[16];
+ unsigned char iniface_mask[16];
+ unsigned char outiface_mask[16];
+ __u16 proto;
+ __u8 tos;
+ __u8 flags;
+ __u8 invflags;
+};
+
+struct xt_classify_target_info {
+ __u32 priority;
+};
+
+struct ipt_entry {
+ struct ipt_ip ip;
+ unsigned int nfcache;
+ __u16 target_offset;
+ __u16 next_offset;
+ unsigned int comefrom;
+ struct xt_counters counters;
+ unsigned char elems[0];
+};
+
+struct ip6t_entry {
+ struct ip6t_ip6 ipv6;
+ unsigned int nfcache;
+ __u16 target_offset;
+ __u16 next_offset;
+ unsigned int comefrom;
+ struct xt_counters counters;
+ unsigned char elems[0];
+};
+
+enum {
+ XT_CT_NOTRACK = 1,
+ XT_CT_NOTRACK_ALIAS = 2,
+ XT_CT_ZONE_DIR_ORIG = 4,
+ XT_CT_ZONE_DIR_REPL = 8,
+ XT_CT_ZONE_MARK = 16,
+ XT_CT_MASK = 31,
+};
+
+struct xt_ct_target_info {
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
+ struct nf_conn *ct;
+};
+
+struct xt_ct_target_info_v1 {
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
+ char timeout[32];
+ struct nf_conn *ct;
+};
+
+struct xt_DSCP_info {
+ __u8 dscp;
+};
+
+struct xt_tos_target_info {
+ __u8 tos_value;
+ __u8 tos_mask;
+};
+
+enum {
+ IPT_TTL_SET = 0,
+ IPT_TTL_INC = 1,
+ IPT_TTL_DEC = 2,
+};
+
+struct ipt_TTL_info {
+ __u8 mode;
+ __u8 ttl;
+};
+
+enum {
+ IP6T_HL_SET = 0,
+ IP6T_HL_INC = 1,
+ IP6T_HL_DEC = 2,
+};
+
+struct ip6t_HL_info {
+ __u8 mode;
+ __u8 hop_limit;
+};
+
+enum {
+ XT_HMARK_SADDR_MASK = 0,
+ XT_HMARK_DADDR_MASK = 1,
+ XT_HMARK_SPI = 2,
+ XT_HMARK_SPI_MASK = 3,
+ XT_HMARK_SPORT = 4,
+ XT_HMARK_DPORT = 5,
+ XT_HMARK_SPORT_MASK = 6,
+ XT_HMARK_DPORT_MASK = 7,
+ XT_HMARK_PROTO_MASK = 8,
+ XT_HMARK_RND = 9,
+ XT_HMARK_MODULUS = 10,
+ XT_HMARK_OFFSET = 11,
+ XT_HMARK_CT = 12,
+ XT_HMARK_METHOD_L3 = 13,
+ XT_HMARK_METHOD_L3_4 = 14,
+};
+
+union hmark_ports {
+ struct {
+ __u16 src;
+ __u16 dst;
+ } p16;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } b16;
+ __u32 v32;
+ __be32 b32;
+};
+
+struct xt_hmark_info {
+ union nf_inet_addr src_mask;
+ union nf_inet_addr dst_mask;
+ union hmark_ports port_mask;
+ union hmark_ports port_set;
+ __u32 flags;
+ __u16 proto_mask;
+ __u32 hashrnd;
+ __u32 hmodulus;
+ __u32 hoffset;
+};
+
+struct hmark_tuple {
+ __be32 src;
+ __be32 dst;
+ union hmark_ports uports;
+ u8 proto;
+};
+
+struct xt_log_info {
+ unsigned char level;
+ unsigned char logflags;
+ char prefix[30];
+};
+
+struct xt_nflog_info {
+ __u32 len;
+ __u16 group;
+ __u16 threshold;
+ __u16 flags;
+ __u16 pad;
+ char prefix[64];
+};
+
+struct xt_NFQ_info {
+ __u16 queuenum;
+};
+
+struct xt_NFQ_info_v1 {
+ __u16 queuenum;
+ __u16 queues_total;
+};
+
+struct xt_NFQ_info_v2 {
+ __u16 queuenum;
+ __u16 queues_total;
+ __u16 bypass;
+};
+
+struct xt_NFQ_info_v3 {
+ __u16 queuenum;
+ __u16 queues_total;
+ __u16 flags;
+};
+
+struct xt_rateest;
+
+struct xt_rateest_target_info {
+ char name[16];
+ __s8 interval;
+ __u8 ewma_log;
+ struct xt_rateest *est;
+};
+
+struct xt_rateest {
+ struct gnet_stats_basic_packed bstats;
+ spinlock_t lock;
+ unsigned int refcnt;
+ struct hlist_node list;
+ char name[16];
+ struct gnet_estimator params;
+ struct callback_head rcu;
+ struct net_rate_estimator *rate_est;
+};
+
+struct xt_rateest_net {
+ struct mutex hash_lock;
+ struct hlist_head hash[16];
+};
+
+struct inet_timewait_sock {
+ struct sock_common __tw_common;
+ __u32 tw_mark;
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+ __be16 tw_sport;
+ unsigned int tw_kill: 1;
+ unsigned int tw_transparent: 1;
+ unsigned int tw_flowlabel: 20;
+ unsigned int tw_pad: 2;
+ unsigned int tw_tos: 8;
+ u32 tw_txhash;
+ u32 tw_priority;
+ struct timer_list tw_timer;
+ struct inet_bind_bucket *tw_tb;
+};
+
+enum nf_tproxy_lookup_t {
+ NF_TPROXY_LOOKUP_LISTENER = 0,
+ NF_TPROXY_LOOKUP_ESTABLISHED = 1,
+};
+
+struct xt_tproxy_target_info {
+ __u32 mark_mask;
+ __u32 mark_value;
+ __be32 laddr;
+ __be16 lport;
+};
+
+struct xt_tproxy_target_info_v1 {
+ __u32 mark_mask;
+ __u32 mark_value;
+ union nf_inet_addr laddr;
+ __be16 lport;
+};
+
+struct xt_tcpmss_info {
+ __u16 mss;
+};
+
+struct xt_tcpoptstrip_target_info {
+ __u32 strip_bmap[8];
+};
+
+struct xt_tee_priv;
+
+struct xt_tee_tginfo {
+ union nf_inet_addr gw;
+ char oif[16];
+ struct xt_tee_priv *priv;
+};
+
+struct xt_tee_priv {
+ struct list_head list;
+ struct xt_tee_tginfo *tginfo;
+ int oif;
+};
+
+struct tee_net {
+ struct list_head priv_list;
+ struct mutex lock;
+};
+
+struct idletimer_tg;
+
+struct idletimer_tg_info {
+ __u32 timeout;
+ char label[28];
+ struct idletimer_tg *timer;
+};
+
+struct idletimer_tg {
+ struct list_head entry;
+ struct alarm alarm;
+ struct timer_list timer;
+ struct work_struct work;
+ struct kobject *kobj;
+ struct device_attribute attr;
+ unsigned int refcnt;
+ u8 timer_type;
+};
+
+struct idletimer_tg_info_v1 {
+ __u32 timeout;
+ char label[28];
+ __u8 send_nl_msg;
+ __u8 timer_type;
+ struct idletimer_tg *timer;
+};
+
+enum {
+ XT_ADDRTYPE_INVERT_SOURCE = 1,
+ XT_ADDRTYPE_INVERT_DEST = 2,
+ XT_ADDRTYPE_LIMIT_IFACE_IN = 4,
+ XT_ADDRTYPE_LIMIT_IFACE_OUT = 8,
+};
+
+struct xt_addrtype_info_v1 {
+ __u16 source;
+ __u16 dest;
+ __u32 flags;
+};
+
+struct xt_addrtype_info {
+ __u16 source;
+ __u16 dest;
+ __u32 invert_source;
+ __u32 invert_dest;
+};
+
+struct xt_bpf_info {
+ __u16 bpf_program_num_elem;
+ struct sock_filter bpf_program[64];
+ struct bpf_prog *filter;
+};
+
+enum xt_bpf_modes {
+ XT_BPF_MODE_BYTECODE = 0,
+ XT_BPF_MODE_FD_PINNED = 1,
+ XT_BPF_MODE_FD_ELF = 2,
+};
+
+struct xt_bpf_info_v1 {
+ __u16 mode;
+ __u16 bpf_program_num_elem;
+ __s32 fd;
+ union {
+ struct sock_filter bpf_program[64];
+ char path[512];
+ };
+ struct bpf_prog *filter;
+};
+
+enum xt_cluster_flags {
+ XT_CLUSTER_F_INV = 1,
+};
+
+struct xt_cluster_match_info {
+ __u32 total_nodes;
+ __u32 node_mask;
+ __u32 hash_seed;
+ __u32 flags;
+};
+
+enum xt_connbytes_what {
+ XT_CONNBYTES_PKTS = 0,
+ XT_CONNBYTES_BYTES = 1,
+ XT_CONNBYTES_AVGPKT = 2,
+};
+
+enum xt_connbytes_direction {
+ XT_CONNBYTES_DIR_ORIGINAL = 0,
+ XT_CONNBYTES_DIR_REPLY = 1,
+ XT_CONNBYTES_DIR_BOTH = 2,
+};
+
+struct xt_connbytes_info {
+ struct {
+ __u64 from;
+ __u64 to;
+ } count;
+ __u8 what;
+ __u8 direction;
+};
+
+enum xt_connlabel_mtopts {
+ XT_CONNLABEL_OP_INVERT = 1,
+ XT_CONNLABEL_OP_SET = 2,
+};
+
+struct xt_connlabel_mtinfo {
+ __u16 bit;
+ __u16 options;
+};
+
+enum {
+ XT_CONNLIMIT_INVERT = 1,
+ XT_CONNLIMIT_DADDR = 2,
+};
+
+struct nf_conncount_data___2;
+
+struct xt_connlimit_info {
+ union {
+ union nf_inet_addr mask;
+ };
+ unsigned int limit;
+ __u32 flags;
+ struct nf_conncount_data___2 *data;
+};
+
+enum {
+ XT_CONNTRACK_STATE = 1,
+ XT_CONNTRACK_PROTO = 2,
+ XT_CONNTRACK_ORIGSRC = 4,
+ XT_CONNTRACK_ORIGDST = 8,
+ XT_CONNTRACK_REPLSRC = 16,
+ XT_CONNTRACK_REPLDST = 32,
+ XT_CONNTRACK_STATUS = 64,
+ XT_CONNTRACK_EXPIRES = 128,
+ XT_CONNTRACK_ORIGSRC_PORT = 256,
+ XT_CONNTRACK_ORIGDST_PORT = 512,
+ XT_CONNTRACK_REPLSRC_PORT = 1024,
+ XT_CONNTRACK_REPLDST_PORT = 2048,
+ XT_CONNTRACK_DIRECTION = 4096,
+ XT_CONNTRACK_STATE_ALIAS = 8192,
+};
+
+struct xt_conntrack_mtinfo1 {
+ union nf_inet_addr origsrc_addr;
+ union nf_inet_addr origsrc_mask;
+ union nf_inet_addr origdst_addr;
+ union nf_inet_addr origdst_mask;
+ union nf_inet_addr replsrc_addr;
+ union nf_inet_addr replsrc_mask;
+ union nf_inet_addr repldst_addr;
+ union nf_inet_addr repldst_mask;
+ __u32 expires_min;
+ __u32 expires_max;
+ __u16 l4proto;
+ __be16 origsrc_port;
+ __be16 origdst_port;
+ __be16 replsrc_port;
+ __be16 repldst_port;
+ __u16 match_flags;
+ __u16 invert_flags;
+ __u8 state_mask;
+ __u8 status_mask;
+};
+
+struct xt_conntrack_mtinfo2 {
+ union nf_inet_addr origsrc_addr;
+ union nf_inet_addr origsrc_mask;
+ union nf_inet_addr origdst_addr;
+ union nf_inet_addr origdst_mask;
+ union nf_inet_addr replsrc_addr;
+ union nf_inet_addr replsrc_mask;
+ union nf_inet_addr repldst_addr;
+ union nf_inet_addr repldst_mask;
+ __u32 expires_min;
+ __u32 expires_max;
+ __u16 l4proto;
+ __be16 origsrc_port;
+ __be16 origdst_port;
+ __be16 replsrc_port;
+ __be16 repldst_port;
+ __u16 match_flags;
+ __u16 invert_flags;
+ __u16 state_mask;
+ __u16 status_mask;
+};
+
+struct xt_conntrack_mtinfo3 {
+ union nf_inet_addr origsrc_addr;
+ union nf_inet_addr origsrc_mask;
+ union nf_inet_addr origdst_addr;
+ union nf_inet_addr origdst_mask;
+ union nf_inet_addr replsrc_addr;
+ union nf_inet_addr replsrc_mask;
+ union nf_inet_addr repldst_addr;
+ union nf_inet_addr repldst_mask;
+ __u32 expires_min;
+ __u32 expires_max;
+ __u16 l4proto;
+ __u16 origsrc_port;
+ __u16 origdst_port;
+ __u16 replsrc_port;
+ __u16 repldst_port;
+ __u16 match_flags;
+ __u16 invert_flags;
+ __u16 state_mask;
+ __u16 status_mask;
+ __u16 origsrc_port_high;
+ __u16 origdst_port_high;
+ __u16 replsrc_port_high;
+ __u16 repldst_port_high;
+};
+
+struct xt_cpu_info {
+ __u32 cpu;
+ __u32 invert;
+};
+
+struct xt_dccp_info {
+ __u16 dpts[2];
+ __u16 spts[2];
+ __u16 flags;
+ __u16 invflags;
+ __u16 typemask;
+ __u8 option;
+};
+
+enum xt_devgroup_flags {
+ XT_DEVGROUP_MATCH_SRC = 1,
+ XT_DEVGROUP_INVERT_SRC = 2,
+ XT_DEVGROUP_MATCH_DST = 4,
+ XT_DEVGROUP_INVERT_DST = 8,
+};
+
+struct xt_devgroup_info {
+ __u32 flags;
+ __u32 src_group;
+ __u32 src_mask;
+ __u32 dst_group;
+ __u32 dst_mask;
+};
+
+struct xt_dscp_info {
+ __u8 dscp;
+ __u8 invert;
+};
+
+struct xt_tos_match_info {
+ __u8 tos_mask;
+ __u8 tos_value;
+ __u8 invert;
+};
+
+struct xt_ecn_info {
+ __u8 operation;
+ __u8 invert;
+ __u8 ip_ect;
+ union {
+ struct {
+ __u8 ect;
+ } tcp;
+ } proto;
+};
+
+struct ip_esp_hdr {
+ __be32 spi;
+ __be32 seq_no;
+ __u8 enc_data[0];
+};
+
+struct xt_esp {
+ __u32 spis[2];
+ __u8 invflags;
+};
+
+enum {
+ XT_HASHLIMIT_HASH_DIP = 1,
+ XT_HASHLIMIT_HASH_DPT = 2,
+ XT_HASHLIMIT_HASH_SIP = 4,
+ XT_HASHLIMIT_HASH_SPT = 8,
+ XT_HASHLIMIT_INVERT = 16,
+ XT_HASHLIMIT_BYTES = 32,
+ XT_HASHLIMIT_RATE_MATCH = 64,
+};
+
+struct hashlimit_cfg3 {
+ __u64 avg;
+ __u64 burst;
+ __u32 mode;
+ __u32 size;
+ __u32 max;
+ __u32 gc_interval;
+ __u32 expire;
+ __u32 interval;
+ __u8 srcmask;
+ __u8 dstmask;
+};
+
+struct xt_hashlimit_htable {
+ struct hlist_node node;
+ refcount_t use;
+ u_int8_t family;
+ bool rnd_initialized;
+ struct hashlimit_cfg3 cfg;
+ spinlock_t lock;
+ u_int32_t rnd;
+ unsigned int count;
+ struct delayed_work gc_work;
+ struct proc_dir_entry *pde;
+ const char *name;
+ struct net *net;
+ struct hlist_head hash[0];
+};
+
+struct hashlimit_cfg1 {
+ __u32 mode;
+ __u32 avg;
+ __u32 burst;
+ __u32 size;
+ __u32 max;
+ __u32 gc_interval;
+ __u32 expire;
+ __u8 srcmask;
+ __u8 dstmask;
+};
+
+struct hashlimit_cfg2 {
+ __u64 avg;
+ __u64 burst;
+ __u32 mode;
+ __u32 size;
+ __u32 max;
+ __u32 gc_interval;
+ __u32 expire;
+ __u8 srcmask;
+ __u8 dstmask;
+};
+
+struct xt_hashlimit_mtinfo1 {
+ char name[16];
+ struct hashlimit_cfg1 cfg;
+ struct xt_hashlimit_htable *hinfo;
+};
+
+struct xt_hashlimit_mtinfo2 {
+ char name[255];
+ struct hashlimit_cfg2 cfg;
+ struct xt_hashlimit_htable *hinfo;
+};
+
+struct xt_hashlimit_mtinfo3 {
+ char name[255];
+ struct hashlimit_cfg3 cfg;
+ struct xt_hashlimit_htable *hinfo;
+};
+
+struct hashlimit_net {
+ struct hlist_head htables;
+ struct proc_dir_entry *ipt_hashlimit;
+ struct proc_dir_entry *ip6t_hashlimit;
+};
+
+struct dsthash_dst {
+ union {
+ struct {
+ __be32 src;
+ __be32 dst;
+ } ip;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+struct dsthash_ent {
+ struct hlist_node node;
+ struct dsthash_dst dst;
+ spinlock_t lock;
+ long unsigned int expires;
+ struct {
+ long unsigned int prev;
+ union {
+ struct {
+ u_int64_t credit;
+ u_int64_t credit_cap;
+ u_int64_t cost;
+ };
+ struct {
+ u_int32_t interval;
+ u_int32_t prev_window;
+ u_int64_t current_rate;
+ u_int64_t rate;
+ int64_t burst;
+ };
+ };
+ } rateinfo;
+ struct callback_head rcu;
+};
+
+struct xt_helper_info {
+ int invert;
+ char name[30];
+};
+
+enum {
+ IPT_TTL_EQ = 0,
+ IPT_TTL_NE = 1,
+ IPT_TTL_LT = 2,
+ IPT_TTL_GT = 3,
+};
+
+struct ipt_ttl_info {
+ __u8 mode;
+ __u8 ttl;
+};
+
+enum {
+ IP6T_HL_EQ = 0,
+ IP6T_HL_NE = 1,
+ IP6T_HL_LT = 2,
+ IP6T_HL_GT = 3,
+};
+
+struct ip6t_hl_info {
+ __u8 mode;
+ __u8 hop_limit;
+};
+
+struct ip_comp_hdr {
+ __u8 nexthdr;
+ __u8 flags;
+ __be16 cpi;
+};
+
+struct xt_ipcomp {
+ __u32 spis[2];
+ __u8 invflags;
+ __u8 hdrres;
+};
+
+enum {
+ IPRANGE_SRC = 1,
+ IPRANGE_DST = 2,
+ IPRANGE_SRC_INV = 16,
+ IPRANGE_DST_INV = 32,
+};
+
+struct xt_iprange_mtinfo {
+ union nf_inet_addr src_min;
+ union nf_inet_addr src_max;
+ union nf_inet_addr dst_min;
+ union nf_inet_addr dst_max;
+ __u8 flags;
+};
+
+struct ip_vs_kstats {
+ u64 conns;
+ u64 inpkts;
+ u64 outpkts;
+ u64 inbytes;
+ u64 outbytes;
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+struct ip_vs_estimator {
+ struct list_head list;
+ u64 last_inbytes;
+ u64 last_outbytes;
+ u64 last_conns;
+ u64 last_inpkts;
+ u64 last_outpkts;
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+struct ip_vs_cpu_stats;
+
+struct ip_vs_stats {
+ struct ip_vs_kstats kstats;
+ struct ip_vs_estimator est;
+ struct ip_vs_cpu_stats *cpustats;
+ spinlock_t lock;
+ struct ip_vs_kstats kstats0;
+};
+
+struct ipvs_sync_daemon_cfg {
+ union nf_inet_addr mcast_group;
+ int syncid;
+ u16 sync_maxlen;
+ u16 mcast_port;
+ u8 mcast_af;
+ u8 mcast_ttl;
+ char mcast_ifn[16];
+};
+
+struct ip_vs_proto_data;
+
+struct ipvs_master_sync_state;
+
+struct ip_vs_sync_thread_data;
+
+struct netns_ipvs {
+ int gen;
+ int enable;
+ struct hlist_head rs_table[16];
+ struct list_head app_list;
+ struct ip_vs_proto_data *proto_data_table[32];
+ struct list_head tcp_apps[16];
+ struct list_head udp_apps[16];
+ struct list_head sctp_apps[16];
+ atomic_t conn_count;
+ struct ip_vs_stats tot_stats;
+ int num_services;
+ struct list_head dest_trash;
+ spinlock_t dest_trash_lock;
+ struct timer_list dest_trash_timer;
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+ atomic_t conn_out_counter;
+ struct delayed_work defense_work;
+ int drop_rate;
+ int drop_counter;
+ int old_secure_tcp;
+ atomic_t dropentry;
+ spinlock_t dropentry_lock;
+ spinlock_t droppacket_lock;
+ spinlock_t securetcp_lock;
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+ int sysctl_conntrack;
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_sync_ports;
+ int sysctl_sync_persist_mode;
+ long unsigned int sysctl_sync_qlen_max;
+ int sysctl_sync_sock_size;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_sloppy_tcp;
+ int sysctl_sloppy_sctp;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ unsigned int sysctl_sync_refresh_period;
+ int sysctl_sync_retries;
+ int sysctl_nat_icmp_send;
+ int sysctl_pmtu_disc;
+ int sysctl_backup_only;
+ int sysctl_conn_reuse_mode;
+ int sysctl_schedule_icmp;
+ int sysctl_ignore_tunneled;
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ struct list_head est_list;
+ spinlock_t est_lock;
+ struct timer_list est_timer;
+ spinlock_t sync_lock;
+ struct ipvs_master_sync_state *ms;
+ spinlock_t sync_buff_lock;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
+ int threads_mask;
+ volatile int sync_state;
+ struct mutex sync_mutex;
+ struct ipvs_sync_daemon_cfg mcfg;
+ struct ipvs_sync_daemon_cfg bcfg;
+ struct net *net;
+ unsigned int mixed_address_family_dests;
+};
+
+enum {
+ XT_IPVS_IPVS_PROPERTY = 1,
+ XT_IPVS_PROTO = 2,
+ XT_IPVS_VADDR = 4,
+ XT_IPVS_VPORT = 8,
+ XT_IPVS_DIR = 16,
+ XT_IPVS_METHOD = 32,
+ XT_IPVS_VPORTCTL = 64,
+ XT_IPVS_MASK = 127,
+ XT_IPVS_ONCE_MASK = 126,
+};
+
+struct xt_ipvs_mtinfo {
+ union nf_inet_addr vaddr;
+ union nf_inet_addr vmask;
+ __be16 vport;
+ __u8 l4proto;
+ __u8 fwd_method;
+ __be16 vportctl;
+ __u8 invert;
+ __u8 bitmask;
+};
+
+struct ip_vs_iphdr {
+ int hdr_flags;
+ __u32 off;
+ __u32 len;
+ __u16 fragoffs;
+ __s16 protocol;
+ __s32 flags;
+ union nf_inet_addr saddr;
+ union nf_inet_addr daddr;
+};
+
+struct ip_vs_seq {
+ __u32 init_seq;
+ __u32 delta;
+ __u32 previous_delta;
+};
+
+struct ip_vs_counters {
+ __u64 conns;
+ __u64 inpkts;
+ __u64 outpkts;
+ __u64 inbytes;
+ __u64 outbytes;
+};
+
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters cnt;
+ struct u64_stats_sync syncp;
+};
+
+struct ip_vs_conn;
+
+struct ip_vs_app;
+
+struct ip_vs_protocol {
+ struct ip_vs_protocol *next;
+ char *name;
+ u16 protocol;
+ u16 num_states;
+ int dont_defrag;
+ void (*init)(struct ip_vs_protocol *);
+ void (*exit)(struct ip_vs_protocol *);
+ int (*init_netns)(struct netns_ipvs *, struct ip_vs_proto_data *);
+ void (*exit_netns)(struct netns_ipvs *, struct ip_vs_proto_data *);
+ int (*conn_schedule)(struct netns_ipvs *, int, struct sk_buff *, struct ip_vs_proto_data *, int *, struct ip_vs_conn **, struct ip_vs_iphdr *);
+ struct ip_vs_conn * (*conn_in_get)(struct netns_ipvs *, int, const struct sk_buff *, const struct ip_vs_iphdr *);
+ struct ip_vs_conn * (*conn_out_get)(struct netns_ipvs *, int, const struct sk_buff *, const struct ip_vs_iphdr *);
+ int (*snat_handler)(struct sk_buff *, struct ip_vs_protocol *, struct ip_vs_conn *, struct ip_vs_iphdr *);
+ int (*dnat_handler)(struct sk_buff *, struct ip_vs_protocol *, struct ip_vs_conn *, struct ip_vs_iphdr *);
+ const char * (*state_name)(int);
+ void (*state_transition)(struct ip_vs_conn *, int, const struct sk_buff *, struct ip_vs_proto_data *);
+ int (*register_app)(struct netns_ipvs *, struct ip_vs_app *);
+ void (*unregister_app)(struct netns_ipvs *, struct ip_vs_app *);
+ int (*app_conn_bind)(struct ip_vs_conn *);
+ void (*debug_packet)(int, struct ip_vs_protocol *, const struct sk_buff *, int, const char *);
+ void (*timeout_change)(struct ip_vs_proto_data *, int);
+};
+
+struct tcp_states_t;
+
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table;
+ atomic_t appcnt;
+ struct tcp_states_t *tcp_state_table;
+};
+
+struct ip_vs_dest;
+
+struct ip_vs_pe;
+
+struct ip_vs_conn {
+ struct hlist_node c_list;
+ __be16 cport;
+ __be16 dport;
+ __be16 vport;
+ u16 af;
+ union nf_inet_addr caddr;
+ union nf_inet_addr vaddr;
+ union nf_inet_addr daddr;
+ volatile __u32 flags;
+ __u16 protocol;
+ __u16 daf;
+ struct netns_ipvs *ipvs;
+ refcount_t refcnt;
+ struct timer_list timer;
+ volatile long unsigned int timeout;
+ spinlock_t lock;
+ volatile __u16 state;
+ volatile __u16 old_state;
+ __u32 fwmark;
+ long unsigned int sync_endtime;
+ struct ip_vs_conn *control;
+ atomic_t n_control;
+ struct ip_vs_dest *dest;
+ atomic_t in_pkts;
+ int (*packet_xmit)(struct sk_buff *, struct ip_vs_conn *, struct ip_vs_protocol *, struct ip_vs_iphdr *);
+ struct ip_vs_app *app;
+ void *app_data;
+ struct ip_vs_seq in_seq;
+ struct ip_vs_seq out_seq;
+ const struct ip_vs_pe *pe;
+ char *pe_data;
+ __u8 pe_data_len;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_app {
+ struct list_head a_list;
+ int type;
+ char *name;
+ __u16 protocol;
+ struct module *module;
+ struct list_head incs_list;
+ struct list_head p_list;
+ struct ip_vs_app *app;
+ __be16 port;
+ atomic_t usecnt;
+ struct callback_head callback_head;
+ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *, struct ip_vs_iphdr *);
+ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *, struct ip_vs_iphdr *);
+ int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+ int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+ int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, struct ip_vs_protocol *);
+ void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *);
+ int *timeout_table;
+ int *timeouts;
+ int timeouts_size;
+ int (*conn_schedule)(struct sk_buff *, struct ip_vs_app *, int *, struct ip_vs_conn **);
+ struct ip_vs_conn * (*conn_in_get)(const struct sk_buff *, struct ip_vs_app *, const struct iphdr *, int);
+ struct ip_vs_conn * (*conn_out_get)(const struct sk_buff *, struct ip_vs_app *, const struct iphdr *, int);
+ int (*state_transition)(struct ip_vs_conn *, int, const struct sk_buff *, struct ip_vs_app *);
+ void (*timeout_change)(struct ip_vs_app *, int);
+};
+
+struct ip_vs_conn_param {
+ struct netns_ipvs *ipvs;
+ const union nf_inet_addr *caddr;
+ const union nf_inet_addr *vaddr;
+ __be16 cport;
+ __be16 vport;
+ __u16 protocol;
+ u16 af;
+ const struct ip_vs_pe *pe;
+ char *pe_data;
+ __u8 pe_data_len;
+};
+
+struct ip_vs_service;
+
+struct ip_vs_pe {
+ struct list_head n_list;
+ char *name;
+ atomic_t refcnt;
+ struct module *module;
+ int (*fill_param)(struct ip_vs_conn_param *, struct sk_buff *);
+ bool (*ct_match)(const struct ip_vs_conn_param *, struct ip_vs_conn *);
+ u32 (*hashkey_raw)(const struct ip_vs_conn_param *, u32, bool);
+ int (*show_pe_data)(const struct ip_vs_conn *, char *);
+ struct ip_vs_conn * (*conn_out)(struct ip_vs_service *, struct ip_vs_dest *, struct sk_buff *, const struct ip_vs_iphdr *, __be16, __be16);
+};
+
+struct ip_vs_dest_dst;
+
+struct ip_vs_dest {
+ struct list_head n_list;
+ struct hlist_node d_list;
+ u16 af;
+ __be16 port;
+ union nf_inet_addr addr;
+ volatile unsigned int flags;
+ atomic_t conn_flags;
+ atomic_t weight;
+ atomic_t last_weight;
+ __u16 tun_type;
+ __be16 tun_port;
+ __u16 tun_flags;
+ refcount_t refcnt;
+ struct ip_vs_stats stats;
+ long unsigned int idle_start;
+ atomic_t activeconns;
+ atomic_t inactconns;
+ atomic_t persistconns;
+ __u32 u_threshold;
+ __u32 l_threshold;
+ spinlock_t dst_lock;
+ struct ip_vs_dest_dst *dest_dst;
+ struct ip_vs_service *svc;
+ __u16 protocol;
+ __be16 vport;
+ union nf_inet_addr vaddr;
+ __u32 vfwmark;
+ struct list_head t_list;
+ unsigned int in_rs_table: 1;
+};
+
+struct ip_vs_scheduler;
+
+struct ip_vs_service {
+ struct hlist_node s_list;
+ struct hlist_node f_list;
+ atomic_t refcnt;
+ u16 af;
+ __u16 protocol;
+ union nf_inet_addr addr;
+ __be16 port;
+ __u32 fwmark;
+ unsigned int flags;
+ unsigned int timeout;
+ __be32 netmask;
+ struct netns_ipvs *ipvs;
+ struct list_head destinations;
+ __u32 num_dests;
+ struct ip_vs_stats stats;
+ struct ip_vs_scheduler *scheduler;
+ spinlock_t sched_lock;
+ void *sched_data;
+ struct ip_vs_pe *pe;
+ int conntrack_afmask;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_scheduler {
+ struct list_head n_list;
+ char *name;
+ atomic_t refcnt;
+ struct module *module;
+ int (*init_service)(struct ip_vs_service *);
+ void (*done_service)(struct ip_vs_service *);
+ int (*add_dest)(struct ip_vs_service *, struct ip_vs_dest *);
+ int (*del_dest)(struct ip_vs_service *, struct ip_vs_dest *);
+ int (*upd_dest)(struct ip_vs_service *, struct ip_vs_dest *);
+ struct ip_vs_dest * (*schedule)(struct ip_vs_service *, const struct sk_buff *, struct ip_vs_iphdr *);
+};
+
+struct ip_vs_dest_dst {
+ struct dst_entry *dst_cache;
+ u32 dst_cookie;
+ union nf_inet_addr dst_saddr;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_sync_buff;
+
+struct ipvs_master_sync_state {
+ struct list_head sync_queue;
+ struct ip_vs_sync_buff *sync_buff;
+ long unsigned int sync_queue_len;
+ unsigned int sync_queue_delay;
+ struct delayed_work master_wakeup_work;
+ struct netns_ipvs *ipvs;
+};
+
+enum xt_l2tp_type {
+ XT_L2TP_TYPE_CONTROL = 0,
+ XT_L2TP_TYPE_DATA = 1,
+};
+
+struct xt_l2tp_info {
+ __u32 tid;
+ __u32 sid;
+ __u8 version;
+ __u8 type;
+ __u8 flags;
+};
+
+enum {
+ XT_L2TP_TID = 1,
+ XT_L2TP_SID = 2,
+ XT_L2TP_VERSION = 4,
+ XT_L2TP_TYPE = 8,
+};
+
+struct l2tp_data {
+ u32 tid;
+ u32 sid;
+ u8 type;
+ u8 version;
+};
+
+union l2tp_val {
+ __be16 val16[2];
+ __be32 val32;
+};
+
+struct xt_length_info {
+ __u16 min;
+ __u16 max;
+ __u8 invert;
+};
+
+struct xt_limit_priv;
+
+struct xt_rateinfo {
+ __u32 avg;
+ __u32 burst;
+ long unsigned int prev;
+ __u32 credit;
+ __u32 credit_cap;
+ __u32 cost;
+ struct xt_limit_priv *master;
+};
+
+struct xt_limit_priv {
+ spinlock_t lock;
+ long unsigned int prev;
+ uint32_t credit;
+};
+
+struct xt_mac_info {
+ unsigned char srcaddr[6];
+ int invert;
+};
+
+enum xt_multiport_flags {
+ XT_MULTIPORT_SOURCE = 0,
+ XT_MULTIPORT_DESTINATION = 1,
+ XT_MULTIPORT_EITHER = 2,
+};
+
+struct xt_multiport_v1 {
+ __u8 flags;
+ __u8 count;
+ __u16 ports[15];
+ __u8 pflags[15];
+ __u8 invert;
+};
+
+struct nf_acct___2;
+
+struct xt_nfacct_match_info {
+ char name[32];
+ struct nf_acct___2 *nfacct;
+};
+
+enum {
+ XT_OWNER_UID = 1,
+ XT_OWNER_GID = 2,
+ XT_OWNER_SOCKET = 4,
+ XT_OWNER_SUPPL_GROUPS = 8,
+};
+
+struct xt_owner_match_info {
+ __u32 uid_min;
+ __u32 uid_max;
+ __u32 gid_min;
+ __u32 gid_max;
+ __u8 match;
+ __u8 invert;
+};
+
+struct xt_cgroup_info_v0 {
+ __u32 id;
+ __u32 invert;
+};
+
+struct xt_cgroup_info_v1 {
+ __u8 has_path;
+ __u8 has_classid;
+ __u8 invert_path;
+ __u8 invert_classid;
+ char path[4096];
+ __u32 classid;
+ void *priv;
+};
+
+struct xt_cgroup_info_v2 {
+ __u8 has_path;
+ __u8 has_classid;
+ __u8 invert_path;
+ __u8 invert_classid;
+ union {
+ char path[512];
+ __u32 classid;
+ };
+ void *priv;
+};
+
+struct xt_pkttype_info {
+ int pkttype;
+ int invert;
+};
+
+enum xt_policy_flags {
+ XT_POLICY_MATCH_IN = 1,
+ XT_POLICY_MATCH_OUT = 2,
+ XT_POLICY_MATCH_NONE = 4,
+ XT_POLICY_MATCH_STRICT = 8,
+};
+
+struct xt_policy_spec {
+ __u8 saddr: 1;
+ __u8 daddr: 1;
+ __u8 proto: 1;
+ __u8 mode: 1;
+ __u8 spi: 1;
+ __u8 reqid: 1;
+};
+
+struct xt_policy_elem {
+ union {
+ struct {
+ union nf_inet_addr saddr;
+ union nf_inet_addr smask;
+ union nf_inet_addr daddr;
+ union nf_inet_addr dmask;
+ };
+ };
+ __be32 spi;
+ __u32 reqid;
+ __u8 proto;
+ __u8 mode;
+ struct xt_policy_spec match;
+ struct xt_policy_spec invert;
+};
+
+struct xt_policy_info {
+ struct xt_policy_elem pol[4];
+ __u16 flags;
+ __u16 len;
+};
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 1,
+};
+
+struct xt_quota_priv;
+
+struct xt_quota_info {
+ __u32 flags;
+ __u32 pad;
+ __u64 quota;
+ struct xt_quota_priv *master;
+};
+
+struct xt_quota_priv {
+ spinlock_t lock;
+ uint64_t quota;
+};
+
+enum xt_rateest_match_flags {
+ XT_RATEEST_MATCH_INVERT = 1,
+ XT_RATEEST_MATCH_ABS = 2,
+ XT_RATEEST_MATCH_REL = 4,
+ XT_RATEEST_MATCH_DELTA = 8,
+ XT_RATEEST_MATCH_BPS = 16,
+ XT_RATEEST_MATCH_PPS = 32,
+};
+
+enum xt_rateest_match_mode {
+ XT_RATEEST_MATCH_NONE = 0,
+ XT_RATEEST_MATCH_EQ = 1,
+ XT_RATEEST_MATCH_LT = 2,
+ XT_RATEEST_MATCH_GT = 3,
+};
+
+struct xt_rateest_match_info {
+ char name1[16];
+ char name2[16];
+ __u16 flags;
+ __u16 mode;
+ __u32 bps1;
+ __u32 pps1;
+ __u32 bps2;
+ __u32 pps2;
+ struct xt_rateest *est1;
+ struct xt_rateest *est2;
+};
+
+struct xt_realm_info {
+ __u32 id;
+ __u32 mask;
+ __u8 invert;
+};
+
+enum {
+ XT_RECENT_CHECK = 1,
+ XT_RECENT_SET = 2,
+ XT_RECENT_UPDATE = 4,
+ XT_RECENT_REMOVE = 8,
+ XT_RECENT_TTL = 16,
+ XT_RECENT_REAP = 32,
+ XT_RECENT_SOURCE = 0,
+ XT_RECENT_DEST = 1,
+ XT_RECENT_NAME_LEN = 200,
+};
+
+struct xt_recent_mtinfo_v1 {
+ __u32 seconds;
+ __u32 hit_count;
+ __u8 check_set;
+ __u8 invert;
+ char name[200];
+ __u8 side;
+ union nf_inet_addr mask;
+};
+
+struct recent_entry {
+ struct list_head list;
+ struct list_head lru_list;
+ union nf_inet_addr addr;
+ u_int16_t family;
+ u_int8_t ttl;
+ u_int8_t index;
+ u_int16_t nstamps;
+ long unsigned int stamps[0];
+};
+
+struct recent_table {
+ struct list_head list;
+ char name[200];
+ union nf_inet_addr mask;
+ unsigned int refcnt;
+ unsigned int entries;
+ u8 nstamps_max_mask;
+ struct list_head lru_list;
+ struct list_head iphash[0];
+};
+
+struct recent_net {
+ struct list_head tables;
+ struct proc_dir_entry *xt_recent;
+};
+
+struct recent_iter_state {
+ const struct recent_table *table;
+ unsigned int bucket;
+};
+
+struct xt_recent_mtinfo_v0;
+
+struct xt_sctp_flag_info {
+ __u8 chunktype;
+ __u8 flag;
+ __u8 flag_mask;
+};
+
+struct xt_sctp_info {
+ __u16 dpts[2];
+ __u16 spts[2];
+ __u32 chunkmap[64];
+ __u32 chunk_match_type;
+ struct xt_sctp_flag_info flag_info[4];
+ int flag_count;
+ __u32 flags;
+ __u32 invflags;
+};
+
+struct xt_state_info {
+ unsigned int statemask;
+};
+
+enum xt_statistic_mode {
+ XT_STATISTIC_MODE_RANDOM = 0,
+ XT_STATISTIC_MODE_NTH = 1,
+ __XT_STATISTIC_MODE_MAX = 2,
+};
+
+enum xt_statistic_flags {
+ XT_STATISTIC_INVERT = 1,
+};
+
+struct xt_statistic_priv;
+
+struct xt_statistic_info {
+ __u16 mode;
+ __u16 flags;
+ union {
+ struct {
+ __u32 probability;
+ } random;
+ struct {
+ __u32 every;
+ __u32 packet;
+ __u32 count;
+ } nth;
+ } u;
+ struct xt_statistic_priv *master;
+};
+
+struct xt_statistic_priv {
+ atomic_t count;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+enum {
+ XT_STRING_FLAG_INVERT = 1,
+ XT_STRING_FLAG_IGNORECASE = 2,
+};
+
+struct xt_string_info {
+ __u16 from_offset;
+ __u16 to_offset;
+ char algo[16];
+ char pattern[128];
+ __u8 patlen;
+ union {
+ struct {
+ __u8 invert;
+ } v0;
+ struct {
+ __u8 flags;
+ } v1;
+ } u;
+ struct ts_config *config;
+};
+
+struct xt_tcpmss_match_info {
+ __u16 mss_min;
+ __u16 mss_max;
+ __u8 invert;
+};
+
+struct xt_time_info {
+ __u32 date_start;
+ __u32 date_stop;
+ __u32 daytime_start;
+ __u32 daytime_stop;
+ __u32 monthdays_match;
+ __u8 weekdays_match;
+ __u8 flags;
+};
+
+enum {
+ XT_TIME_LOCAL_TZ = 1,
+ XT_TIME_CONTIGUOUS = 2,
+ XT_TIME_ALL_MONTHDAYS = -2,
+ XT_TIME_ALL_WEEKDAYS = 254,
+ XT_TIME_MIN_DAYTIME = 0,
+ XT_TIME_MAX_DAYTIME = 86399,
+};
+
+struct xtm {
+ u_int8_t month;
+ u_int8_t monthday;
+ u_int8_t weekday;
+ u_int8_t hour;
+ u_int8_t minute;
+ u_int8_t second;
+ unsigned int dse;
+};
+
+enum {
+ DSE_FIRST = 2039,
+ SECONDS_PER_DAY = 86400,
+};
+
+enum xt_u32_ops {
+ XT_U32_AND = 0,
+ XT_U32_LEFTSH = 1,
+ XT_U32_RIGHTSH = 2,
+ XT_U32_AT = 3,
+};
+
+struct xt_u32_location_element {
+ __u32 number;
+ __u8 nextop;
+};
+
+struct xt_u32_value_element {
+ __u32 min;
+ __u32 max;
+};
+
+struct xt_u32_test {
+ struct xt_u32_location_element location[11];
+ struct xt_u32_value_element value[11];
+ __u8 nnums;
+ __u8 nvalues;
+};
+
+struct xt_u32 {
+ struct xt_u32_test tests[11];
+ __u8 ntests;
+ __u8 invert;
+};
+
+enum ipset_cmd {
+ IPSET_CMD_NONE = 0,
+ IPSET_CMD_PROTOCOL = 1,
+ IPSET_CMD_CREATE = 2,
+ IPSET_CMD_DESTROY = 3,
+ IPSET_CMD_FLUSH = 4,
+ IPSET_CMD_RENAME = 5,
+ IPSET_CMD_SWAP = 6,
+ IPSET_CMD_LIST = 7,
+ IPSET_CMD_SAVE = 8,
+ IPSET_CMD_ADD = 9,
+ IPSET_CMD_DEL = 10,
+ IPSET_CMD_TEST = 11,
+ IPSET_CMD_HEADER = 12,
+ IPSET_CMD_TYPE = 13,
+ IPSET_CMD_GET_BYNAME = 14,
+ IPSET_CMD_GET_BYINDEX = 15,
+ IPSET_MSG_MAX = 16,
+ IPSET_CMD_RESTORE = 16,
+ IPSET_CMD_HELP = 17,
+ IPSET_CMD_VERSION = 18,
+ IPSET_CMD_QUIT = 19,
+ IPSET_CMD_MAX = 20,
+ IPSET_CMD_COMMIT = 20,
+};
+
+enum {
+ IPSET_ATTR_IPADDR_IPV4 = 1,
+ IPSET_ATTR_IPADDR_IPV6 = 2,
+ __IPSET_ATTR_IPADDR_MAX = 3,
+};
+
+enum ipset_errno {
+ IPSET_ERR_PRIVATE = 4096,
+ IPSET_ERR_PROTOCOL = 4097,
+ IPSET_ERR_FIND_TYPE = 4098,
+ IPSET_ERR_MAX_SETS = 4099,
+ IPSET_ERR_BUSY = 4100,
+ IPSET_ERR_EXIST_SETNAME2 = 4101,
+ IPSET_ERR_TYPE_MISMATCH = 4102,
+ IPSET_ERR_EXIST = 4103,
+ IPSET_ERR_INVALID_CIDR = 4104,
+ IPSET_ERR_INVALID_NETMASK = 4105,
+ IPSET_ERR_INVALID_FAMILY = 4106,
+ IPSET_ERR_TIMEOUT = 4107,
+ IPSET_ERR_REFERENCED = 4108,
+ IPSET_ERR_IPADDR_IPV4 = 4109,
+ IPSET_ERR_IPADDR_IPV6 = 4110,
+ IPSET_ERR_COUNTER = 4111,
+ IPSET_ERR_COMMENT = 4112,
+ IPSET_ERR_INVALID_MARKMASK = 4113,
+ IPSET_ERR_SKBINFO = 4114,
+ IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+enum ipset_create_flags {
+ IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
+ IPSET_CREATE_FLAG_FORCEADD = 1,
+ IPSET_CREATE_FLAG_BIT_MAX = 7,
+};
+
+union ip_set_name_index {
+ char name[32];
+ ip_set_id_t index;
+};
+
+struct ip_set_req_get_set {
+ unsigned int op;
+ unsigned int version;
+ union ip_set_name_index set;
+};
+
+struct ip_set_req_get_set_family {
+ unsigned int op;
+ unsigned int version;
+ unsigned int family;
+ union ip_set_name_index set;
+};
+
+struct ip_set_req_version {
+ unsigned int op;
+ unsigned int version;
+};
+
+enum ip_set_feature {
+ IPSET_TYPE_IP_FLAG = 0,
+ IPSET_TYPE_IP = 1,
+ IPSET_TYPE_PORT_FLAG = 1,
+ IPSET_TYPE_PORT = 2,
+ IPSET_TYPE_MAC_FLAG = 2,
+ IPSET_TYPE_MAC = 4,
+ IPSET_TYPE_IP2_FLAG = 3,
+ IPSET_TYPE_IP2 = 8,
+ IPSET_TYPE_NAME_FLAG = 4,
+ IPSET_TYPE_NAME = 16,
+ IPSET_TYPE_IFACE_FLAG = 5,
+ IPSET_TYPE_IFACE = 32,
+ IPSET_TYPE_MARK_FLAG = 6,
+ IPSET_TYPE_MARK = 64,
+ IPSET_TYPE_NOMATCH_FLAG = 7,
+ IPSET_TYPE_NOMATCH = 128,
+ IPSET_DUMP_LAST_FLAG = 8,
+ IPSET_DUMP_LAST = 256,
+};
+
+struct ip_set_counter {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+struct ip_set_comment_rcu {
+ struct callback_head rcu;
+ char str[0];
+};
+
+struct ip_set_comment {
+ struct ip_set_comment_rcu *c;
+};
+
+enum {
+ IPSET_CB_NET = 0,
+ IPSET_CB_PROTO = 1,
+ IPSET_CB_DUMP = 2,
+ IPSET_CB_INDEX = 3,
+ IPSET_CB_PRIVATE = 4,
+ IPSET_CB_ARG0 = 5,
+};
+
+struct ip_set_net {
+ struct ip_set **ip_set_list;
+ ip_set_id_t ip_set_max;
+ bool is_deleted;
+ bool is_destroyed;
+};
+
+enum {
+ IPSET_ERR_BITMAP_RANGE = 4352,
+ IPSET_ERR_BITMAP_RANGE_SIZE = 4353,
+};
+
+enum {
+ IPSET_ADD_STORE_PLAIN_TIMEOUT = -1,
+ IPSET_ADD_FAILED = 1,
+ IPSET_ADD_START_STORED_TIMEOUT = 2,
+};
+
+struct bitmap_ip {
+ long unsigned int *members;
+ u32 first_ip;
+ u32 last_ip;
+ u32 elements;
+ u32 hosts;
+ size_t memsize;
+ u8 netmask;
+ struct timer_list gc;
+ struct ip_set *set;
+ unsigned char extensions[0];
+};
+
+struct bitmap_ip_adt_elem {
+ u16 id;
+};
+
+struct bitmap_ip_elem {};
+
+enum {
+ MAC_UNSET = 0,
+ MAC_FILLED = 1,
+};
+
+struct bitmap_ipmac {
+ long unsigned int *members;
+ u32 first_ip;
+ u32 last_ip;
+ u32 elements;
+ size_t memsize;
+ struct timer_list gc;
+ struct ip_set *set;
+ unsigned char extensions[0];
+};
+
+struct bitmap_ipmac_adt_elem {
+ unsigned char ether[6];
+ u16 id;
+ u16 add_mac;
+};
+
+struct bitmap_ipmac_elem {
+ unsigned char ether[6];
+ unsigned char filled;
+ char: 8;
+};
+
+struct bitmap_port {
+ long unsigned int *members;
+ u16 first_port;
+ u16 last_port;
+ u32 elements;
+ size_t memsize;
+ struct timer_list gc;
+ struct ip_set *set;
+ unsigned char extensions[0];
+};
+
+struct bitmap_port_adt_elem {
+ u16 id;
+};
+
+struct bitmap_port_elem {};
+
+struct ip_set_region {
+ spinlock_t lock;
+ size_t ext_size;
+ u32 elements;
+};
+
+enum {
+ IPSET_ERR_HASH_FULL = 4352,
+ IPSET_ERR_HASH_ELEM = 4353,
+ IPSET_ERR_INVALID_PROTO = 4354,
+ IPSET_ERR_MISSING_PROTO = 4355,
+ IPSET_ERR_HASH_RANGE_UNSUPPORTED = 4356,
+ IPSET_ERR_HASH_RANGE = 4357,
+};
+
+struct hash_ip4_elem {
+ __be32 ip;
+};
+
+struct hbucket {
+ struct callback_head rcu;
+ long unsigned int used[1];
+ u8 size;
+ u8 pos;
+ long: 48;
+ unsigned char value[0];
+};
+
+struct htable_gc {
+ struct delayed_work dwork;
+ struct ip_set *set;
+ u32 region;
+};
+
+struct htable {
+ atomic_t ref;
+ atomic_t uref;
+ u8 htable_bits;
+ u32 maxelem;
+ struct ip_set_region *hregion;
+ struct hbucket *bucket[0];
+};
+
+struct hash_ip4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u8 netmask;
+ struct list_head ad;
+ struct hash_ip4_elem next;
+};
+
+struct hash_ip4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ip4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ip6_elem {
+ union nf_inet_addr ip;
+};
+
+struct hash_ip6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u8 netmask;
+ struct list_head ad;
+ struct hash_ip6_elem next;
+};
+
+struct hash_ip6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ip6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipmark4_elem {
+ __be32 ip;
+ __u32 mark;
+};
+
+struct hash_ipmark4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u32 markmask;
+ struct list_head ad;
+ struct hash_ipmark4_elem next;
+};
+
+struct hash_ipmark4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipmark4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipmark6_elem {
+ union nf_inet_addr ip;
+ __u32 mark;
+};
+
+struct hash_ipmark6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u32 markmask;
+ struct list_head ad;
+ struct hash_ipmark6_elem next;
+};
+
+struct hash_ipmark6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipmark6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipport4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipport4_elem next;
+};
+
+struct hash_ipport4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipport4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipport6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipport6_elem next;
+};
+
+struct hash_ipport6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipport6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipportip4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipportip4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipportip4_elem next;
+};
+
+struct hash_ipportip4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipportip4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipportip6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipportip6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipportip6_elem next;
+};
+
+struct hash_ipportip6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipportip6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipportnet4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr: 7;
+ u8 nomatch: 1;
+ u8 proto;
+};
+
+struct net_prefixes {
+ u32 nets[1];
+ u8 cidr[1];
+};
+
+struct hash_ipportnet4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipportnet4_elem next;
+ struct net_prefixes nets[32];
+};
+
+struct hash_ipportnet4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipportnet4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_ipportnet6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr: 7;
+ u8 nomatch: 1;
+ u8 proto;
+};
+
+struct hash_ipportnet6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_ipportnet6_elem next;
+ struct net_prefixes nets[128];
+};
+
+struct hash_ipportnet6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_ipportnet6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_mac4_elem {
+ union {
+ unsigned char ether[6];
+ __be32 foo[2];
+ };
+};
+
+struct hash_mac4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_mac4_elem next;
+};
+
+struct hash_mac4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_mac4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_net4_elem {
+ __be32 ip;
+ u16 padding0;
+ u8 nomatch;
+ u8 cidr;
+};
+
+struct hash_net4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_net4_elem next;
+ struct net_prefixes nets[32];
+};
+
+struct hash_net4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_net4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_net6_elem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 nomatch;
+ u8 cidr;
+};
+
+struct hash_net6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_net6_elem next;
+ struct net_prefixes nets[128];
+};
+
+struct hash_net6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_net6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr: 7;
+ u8 nomatch: 1;
+};
+
+struct hash_netport4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netport4_elem next;
+ struct net_prefixes nets[32];
+};
+
+struct hash_netport4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netport4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr: 7;
+ u8 nomatch: 1;
+};
+
+struct hash_netport6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netport6_elem next;
+ struct net_prefixes nets[128];
+};
+
+struct hash_netport6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netport6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netiface4_elem {
+ __be32 ip;
+ u8 physdev;
+ u8 cidr;
+ u8 nomatch;
+ u8 elem;
+ u8 wildcard;
+ char iface[16];
+};
+
+struct hash_netiface4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u8 ahash_max;
+ struct list_head ad;
+ struct hash_netiface4_elem next;
+ struct net_prefixes nets[33];
+};
+
+struct hash_netiface4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netiface4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netiface6_elem {
+ union nf_inet_addr ip;
+ u8 physdev;
+ u8 cidr;
+ u8 nomatch;
+ u8 elem;
+ u8 wildcard;
+ char iface[16];
+};
+
+struct hash_netiface6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ u8 ahash_max;
+ struct list_head ad;
+ struct hash_netiface6_elem next;
+ struct net_prefixes nets[129];
+};
+
+struct hash_netiface6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netiface6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netnet4_elem {
+ union {
+ __be32 ip[2];
+ __be64 ipcmp;
+ };
+ u8 nomatch;
+ u8 padding;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+};
+
+struct net_prefixes___2 {
+ u32 nets[2];
+ u8 cidr[2];
+};
+
+struct hash_netnet4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netnet4_elem next;
+ struct net_prefixes___2 nets[32];
+};
+
+struct hash_netnet4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netnet4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netnet6_elem {
+ union nf_inet_addr ip[2];
+ u8 nomatch;
+ u8 padding;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+};
+
+struct hash_netnet6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netnet6_elem next;
+ struct net_prefixes___2 nets[128];
+};
+
+struct hash_netnet6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netnet6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netportnet4_elem {
+ union {
+ __be32 ip[2];
+ __be64 ipcmp;
+ };
+ __be16 port;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+ u16 padding;
+ u8 nomatch;
+ u8 proto;
+};
+
+struct hash_netportnet4 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netportnet4_elem next;
+ struct net_prefixes___2 nets[32];
+};
+
+struct hash_netportnet4_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netportnet4_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+struct hash_netportnet6_elem {
+ union nf_inet_addr ip[2];
+ __be16 port;
+ union {
+ u8 cidr[2];
+ u16 ccmp;
+ };
+ u16 padding;
+ u8 nomatch;
+ u8 proto;
+};
+
+struct hash_netportnet6 {
+ struct htable *table;
+ struct htable_gc gc;
+ u32 maxelem;
+ u32 initval;
+ struct list_head ad;
+ struct hash_netportnet6_elem next;
+ struct net_prefixes___2 nets[128];
+};
+
+struct hash_netportnet6_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad;
+ struct hash_netportnet6_elem d;
+ struct ip_set_ext ext;
+ struct ip_set_ext mext;
+ u32 flags;
+};
+
+enum {
+ IPSET_ERR_NAME = 4352,
+ IPSET_ERR_LOOP = 4353,
+ IPSET_ERR_BEFORE = 4354,
+ IPSET_ERR_NAMEREF = 4355,
+ IPSET_ERR_LIST_FULL = 4356,
+ IPSET_ERR_REF_EXIST = 4357,
+};
+
+struct set_elem {
+ struct callback_head rcu;
+ struct list_head list;
+ struct ip_set *set;
+ ip_set_id_t id;
+};
+
+struct set_adt_elem {
+ ip_set_id_t id;
+ ip_set_id_t refid;
+ int before;
+};
+
+struct list_set {
+ u32 size;
+ struct timer_list gc;
+ struct ip_set *set;
+ struct net *net;
+ struct list_head members;
+};
+
+enum {
+ IP_VS_TCP_S_NONE = 0,
+ IP_VS_TCP_S_ESTABLISHED = 1,
+ IP_VS_TCP_S_SYN_SENT = 2,
+ IP_VS_TCP_S_SYN_RECV = 3,
+ IP_VS_TCP_S_FIN_WAIT = 4,
+ IP_VS_TCP_S_TIME_WAIT = 5,
+ IP_VS_TCP_S_CLOSE = 6,
+ IP_VS_TCP_S_CLOSE_WAIT = 7,
+ IP_VS_TCP_S_LAST_ACK = 8,
+ IP_VS_TCP_S_LISTEN = 9,
+ IP_VS_TCP_S_SYNACK = 10,
+ IP_VS_TCP_S_LAST = 11,
+};
+
+enum ip_vs_sctp_states {
+ IP_VS_SCTP_S_NONE = 0,
+ IP_VS_SCTP_S_INIT1 = 1,
+ IP_VS_SCTP_S_INIT = 2,
+ IP_VS_SCTP_S_COOKIE_SENT = 3,
+ IP_VS_SCTP_S_COOKIE_REPLIED = 4,
+ IP_VS_SCTP_S_COOKIE_WAIT = 5,
+ IP_VS_SCTP_S_COOKIE = 6,
+ IP_VS_SCTP_S_COOKIE_ECHOED = 7,
+ IP_VS_SCTP_S_ESTABLISHED = 8,
+ IP_VS_SCTP_S_SHUTDOWN_SENT = 9,
+ IP_VS_SCTP_S_SHUTDOWN_RECEIVED = 10,
+ IP_VS_SCTP_S_SHUTDOWN_ACK_SENT = 11,
+ IP_VS_SCTP_S_REJECTED = 12,
+ IP_VS_SCTP_S_CLOSED = 13,
+ IP_VS_SCTP_S_LAST = 14,
+};
+
+struct ip_vs_aligned_lock {
+ spinlock_t l;
+};
+
+struct ip_vs_iter_state {
+ struct seq_net_private p;
+ struct hlist_head *l;
+};
+
+enum ip_defrag_users {
+ IP_DEFRAG_LOCAL_DELIVER = 0,
+ IP_DEFRAG_CALL_RA_CHAIN = 1,
+ IP_DEFRAG_CONNTRACK_IN = 2,
+ __IP_DEFRAG_CONNTRACK_IN_END = 65537,
+ IP_DEFRAG_CONNTRACK_OUT = 65538,
+ __IP_DEFRAG_CONNTRACK_OUT_END = 131073,
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074,
+ __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609,
+ IP_DEFRAG_VS_IN = 196610,
+ IP_DEFRAG_VS_OUT = 196611,
+ IP_DEFRAG_VS_FWD = 196612,
+ IP_DEFRAG_AF_PACKET = 196613,
+ IP_DEFRAG_MACVLAN = 196614,
+};
+
+struct guehdr {
+ union {
+ struct {
+ __u8 hlen: 5;
+ __u8 control: 1;
+ __u8 version: 2;
+ __u8 proto_ctype;
+ __be16 flags;
+ };
+ __be32 word;
+ };
+};
+
+enum {
+ IP_VS_CONN_F_TUNNEL_TYPE_IPIP = 0,
+ IP_VS_CONN_F_TUNNEL_TYPE_GUE = 1,
+ IP_VS_CONN_F_TUNNEL_TYPE_GRE = 2,
+ IP_VS_CONN_F_TUNNEL_TYPE_MAX = 3,
+};
+
+enum {
+ IP_VS_DIR_INPUT = 0,
+ IP_VS_DIR_OUTPUT = 1,
+ IP_VS_DIR_INPUT_ONLY = 2,
+ IP_VS_DIR_LAST = 3,
+};
+
+struct ip_vs_service_user {
+ __u16 protocol;
+ __be32 addr;
+ __be16 port;
+ __u32 fwmark;
+ char sched_name[16];
+ unsigned int flags;
+ unsigned int timeout;
+ __be32 netmask;
+};
+
+struct ip_vs_dest_user {
+ __be32 addr;
+ __be16 port;
+ unsigned int conn_flags;
+ int weight;
+ __u32 u_threshold;
+ __u32 l_threshold;
+};
+
+struct ip_vs_stats_user {
+ __u32 conns;
+ __u32 inpkts;
+ __u32 outpkts;
+ __u64 inbytes;
+ __u64 outbytes;
+ __u32 cps;
+ __u32 inpps;
+ __u32 outpps;
+ __u32 inbps;
+ __u32 outbps;
+};
+
+struct ip_vs_getinfo {
+ unsigned int version;
+ unsigned int size;
+ unsigned int num_services;
+};
+
+struct ip_vs_service_entry {
+ __u16 protocol;
+ __be32 addr;
+ __be16 port;
+ __u32 fwmark;
+ char sched_name[16];
+ unsigned int flags;
+ unsigned int timeout;
+ __be32 netmask;
+ unsigned int num_dests;
+ struct ip_vs_stats_user stats;
+};
+
+struct ip_vs_dest_entry {
+ __be32 addr;
+ __be16 port;
+ unsigned int conn_flags;
+ int weight;
+ __u32 u_threshold;
+ __u32 l_threshold;
+ __u32 activeconns;
+ __u32 inactconns;
+ __u32 persistconns;
+ struct ip_vs_stats_user stats;
+};
+
+struct ip_vs_get_dests {
+ __u16 protocol;
+ __be32 addr;
+ __be16 port;
+ __u32 fwmark;
+ unsigned int num_dests;
+ struct ip_vs_dest_entry entrytable[0];
+};
+
+struct ip_vs_get_services {
+ unsigned int num_services;
+ struct ip_vs_service_entry entrytable[0];
+};
+
+struct ip_vs_timeout_user {
+ int tcp_timeout;
+ int tcp_fin_timeout;
+ int udp_timeout;
+};
+
+struct ip_vs_daemon_user {
+ int state;
+ char mcast_ifn[16];
+ int syncid;
+};
+
+struct ip_vs_flags {
+ __u32 flags;
+ __u32 mask;
+};
+
+enum {
+ IPVS_CMD_UNSPEC = 0,
+ IPVS_CMD_NEW_SERVICE = 1,
+ IPVS_CMD_SET_SERVICE = 2,
+ IPVS_CMD_DEL_SERVICE = 3,
+ IPVS_CMD_GET_SERVICE = 4,
+ IPVS_CMD_NEW_DEST = 5,
+ IPVS_CMD_SET_DEST = 6,
+ IPVS_CMD_DEL_DEST = 7,
+ IPVS_CMD_GET_DEST = 8,
+ IPVS_CMD_NEW_DAEMON = 9,
+ IPVS_CMD_DEL_DAEMON = 10,
+ IPVS_CMD_GET_DAEMON = 11,
+ IPVS_CMD_SET_CONFIG = 12,
+ IPVS_CMD_GET_CONFIG = 13,
+ IPVS_CMD_SET_INFO = 14,
+ IPVS_CMD_GET_INFO = 15,
+ IPVS_CMD_ZERO = 16,
+ IPVS_CMD_FLUSH = 17,
+ __IPVS_CMD_MAX = 18,
+};
+
+enum {
+ IPVS_CMD_ATTR_UNSPEC = 0,
+ IPVS_CMD_ATTR_SERVICE = 1,
+ IPVS_CMD_ATTR_DEST = 2,
+ IPVS_CMD_ATTR_DAEMON = 3,
+ IPVS_CMD_ATTR_TIMEOUT_TCP = 4,
+ IPVS_CMD_ATTR_TIMEOUT_TCP_FIN = 5,
+ IPVS_CMD_ATTR_TIMEOUT_UDP = 6,
+ __IPVS_CMD_ATTR_MAX = 7,
+};
+
+enum {
+ IPVS_SVC_ATTR_UNSPEC = 0,
+ IPVS_SVC_ATTR_AF = 1,
+ IPVS_SVC_ATTR_PROTOCOL = 2,
+ IPVS_SVC_ATTR_ADDR = 3,
+ IPVS_SVC_ATTR_PORT = 4,
+ IPVS_SVC_ATTR_FWMARK = 5,
+ IPVS_SVC_ATTR_SCHED_NAME = 6,
+ IPVS_SVC_ATTR_FLAGS = 7,
+ IPVS_SVC_ATTR_TIMEOUT = 8,
+ IPVS_SVC_ATTR_NETMASK = 9,
+ IPVS_SVC_ATTR_STATS = 10,
+ IPVS_SVC_ATTR_PE_NAME = 11,
+ IPVS_SVC_ATTR_STATS64 = 12,
+ __IPVS_SVC_ATTR_MAX = 13,
+};
+
+enum {
+ IPVS_DEST_ATTR_UNSPEC = 0,
+ IPVS_DEST_ATTR_ADDR = 1,
+ IPVS_DEST_ATTR_PORT = 2,
+ IPVS_DEST_ATTR_FWD_METHOD = 3,
+ IPVS_DEST_ATTR_WEIGHT = 4,
+ IPVS_DEST_ATTR_U_THRESH = 5,
+ IPVS_DEST_ATTR_L_THRESH = 6,
+ IPVS_DEST_ATTR_ACTIVE_CONNS = 7,
+ IPVS_DEST_ATTR_INACT_CONNS = 8,
+ IPVS_DEST_ATTR_PERSIST_CONNS = 9,
+ IPVS_DEST_ATTR_STATS = 10,
+ IPVS_DEST_ATTR_ADDR_FAMILY = 11,
+ IPVS_DEST_ATTR_STATS64 = 12,
+ IPVS_DEST_ATTR_TUN_TYPE = 13,
+ IPVS_DEST_ATTR_TUN_PORT = 14,
+ IPVS_DEST_ATTR_TUN_FLAGS = 15,
+ __IPVS_DEST_ATTR_MAX = 16,
+};
+
+enum {
+ IPVS_DAEMON_ATTR_UNSPEC = 0,
+ IPVS_DAEMON_ATTR_STATE = 1,
+ IPVS_DAEMON_ATTR_MCAST_IFN = 2,
+ IPVS_DAEMON_ATTR_SYNC_ID = 3,
+ IPVS_DAEMON_ATTR_SYNC_MAXLEN = 4,
+ IPVS_DAEMON_ATTR_MCAST_GROUP = 5,
+ IPVS_DAEMON_ATTR_MCAST_GROUP6 = 6,
+ IPVS_DAEMON_ATTR_MCAST_PORT = 7,
+ IPVS_DAEMON_ATTR_MCAST_TTL = 8,
+ __IPVS_DAEMON_ATTR_MAX = 9,
+};
+
+enum {
+ IPVS_STATS_ATTR_UNSPEC = 0,
+ IPVS_STATS_ATTR_CONNS = 1,
+ IPVS_STATS_ATTR_INPKTS = 2,
+ IPVS_STATS_ATTR_OUTPKTS = 3,
+ IPVS_STATS_ATTR_INBYTES = 4,
+ IPVS_STATS_ATTR_OUTBYTES = 5,
+ IPVS_STATS_ATTR_CPS = 6,
+ IPVS_STATS_ATTR_INPPS = 7,
+ IPVS_STATS_ATTR_OUTPPS = 8,
+ IPVS_STATS_ATTR_INBPS = 9,
+ IPVS_STATS_ATTR_OUTBPS = 10,
+ IPVS_STATS_ATTR_PAD = 11,
+ __IPVS_STATS_ATTR_MAX = 12,
+};
+
+enum {
+ IPVS_INFO_ATTR_UNSPEC = 0,
+ IPVS_INFO_ATTR_VERSION = 1,
+ IPVS_INFO_ATTR_CONN_TAB_SIZE = 2,
+ __IPVS_INFO_ATTR_MAX = 3,
+};
+
+enum {
+ IP_VS_UDP_S_NORMAL = 0,
+ IP_VS_UDP_S_LAST = 1,
+};
+
+struct ip_vs_service_user_kern {
+ u16 af;
+ u16 protocol;
+ union nf_inet_addr addr;
+ __be16 port;
+ u32 fwmark;
+ char *sched_name;
+ char *pe_name;
+ unsigned int flags;
+ unsigned int timeout;
+ __be32 netmask;
+};
+
+struct ip_vs_dest_user_kern {
+ union nf_inet_addr addr;
+ __be16 port;
+ unsigned int conn_flags;
+ int weight;
+ u32 u_threshold;
+ u32 l_threshold;
+ u16 af;
+ u16 tun_type;
+ __be16 tun_port;
+ u16 tun_flags;
+};
+
+struct ip_vs_iter {
+ struct seq_net_private p;
+ struct hlist_head *table;
+ int bucket;
+};
+
+enum {
+ IP_VS_RT_MODE_LOCAL = 1,
+ IP_VS_RT_MODE_NON_LOCAL = 2,
+ IP_VS_RT_MODE_RDR = 4,
+ IP_VS_RT_MODE_CONNECT = 8,
+ IP_VS_RT_MODE_KNOWN_NH = 16,
+ IP_VS_RT_MODE_TUNNEL = 32,
+};
+
+struct ip_mreqn {
+ struct in_addr imr_multiaddr;
+ struct in_addr imr_address;
+ int imr_ifindex;
+};
+
+struct ip_sf_list;
+
+struct ip_mc_list {
+ struct in_device *interface;
+ __be32 multiaddr;
+ unsigned int sfmode;
+ struct ip_sf_list *sources;
+ struct ip_sf_list *tomb;
+ long unsigned int sfcount[2];
+ union {
+ struct ip_mc_list *next;
+ struct ip_mc_list *next_rcu;
+ };
+ struct ip_mc_list *next_hash;
+ struct timer_list timer;
+ int users;
+ refcount_t refcnt;
+ spinlock_t lock;
+ char tm_running;
+ char reporter;
+ char unsolicit_count;
+ char loaded;
+ unsigned char gsquery;
+ unsigned char crcount;
+ struct callback_head rcu;
+};
+
+struct ip_sf_socklist {
+ unsigned int sl_max;
+ unsigned int sl_count;
+ struct callback_head rcu;
+ __be32 sl_addr[0];
+};
+
+struct ip_mc_socklist {
+ struct ip_mc_socklist *next_rcu;
+ struct ip_mreqn multi;
+ unsigned int sfmode;
+ struct ip_sf_socklist *sflist;
+ struct callback_head rcu;
+};
+
+struct ip_sf_list {
+ struct ip_sf_list *sf_next;
+ long unsigned int sf_count[2];
+ __be32 sf_inaddr;
+ unsigned char sf_gsresp;
+ unsigned char sf_oldin;
+ unsigned char sf_crcount;
+};
+
+struct ip_vs_sync_mesg;
+
+struct ip_vs_sync_buff {
+ struct list_head list;
+ long unsigned int firstuse;
+ struct ip_vs_sync_mesg *mesg;
+ unsigned char *head;
+ unsigned char *end;
+};
+
+struct ip_vs_sync_thread_data {
+ struct task_struct *task;
+ struct netns_ipvs *ipvs;
+ struct socket *sock;
+ char *buf;
+ int id;
+};
+
+struct ip_vs_sync_conn_v0 {
+ __u8 reserved;
+ __u8 protocol;
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 caddr;
+ __be32 vaddr;
+ __be32 daddr;
+ __be16 flags;
+ __be16 state;
+};
+
+struct ip_vs_sync_conn_options {
+ struct ip_vs_seq in_seq;
+ struct ip_vs_seq out_seq;
+};
+
+struct ip_vs_sync_v4 {
+ __u8 type;
+ __u8 protocol;
+ __be16 ver_size;
+ __be32 flags;
+ __be16 state;
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark;
+ __be32 timeout;
+ __be32 caddr;
+ __be32 vaddr;
+ __be32 daddr;
+};
+
+struct ip_vs_sync_v6 {
+ __u8 type;
+ __u8 protocol;
+ __be16 ver_size;
+ __be32 flags;
+ __be16 state;
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark;
+ __be32 timeout;
+ struct in6_addr caddr;
+ struct in6_addr vaddr;
+ struct in6_addr daddr;
+};
+
+union ip_vs_sync_conn {
+ struct ip_vs_sync_v4 v4;
+ struct ip_vs_sync_v6 v6;
+};
+
+struct ip_vs_sync_mesg_v0 {
+ __u8 nr_conns;
+ __u8 syncid;
+ __be16 size;
+};
+
+struct ip_vs_sync_mesg {
+ __u8 reserved;
+ __u8 syncid;
+ __be16 size;
+ __u8 nr_conns;
+ __s8 version;
+ __u16 spare;
+};
+
+union ipvs_sockaddr {
+ struct sockaddr_in in;
+ struct sockaddr_in6 in6;
+};
+
+struct tcp_states_t {
+ int next_state[11];
+};
+
+enum ipvs_sctp_event_t {
+ IP_VS_SCTP_DATA = 0,
+ IP_VS_SCTP_INIT = 1,
+ IP_VS_SCTP_INIT_ACK = 2,
+ IP_VS_SCTP_COOKIE_ECHO = 3,
+ IP_VS_SCTP_COOKIE_ACK = 4,
+ IP_VS_SCTP_SHUTDOWN = 5,
+ IP_VS_SCTP_SHUTDOWN_ACK = 6,
+ IP_VS_SCTP_SHUTDOWN_COMPLETE = 7,
+ IP_VS_SCTP_ERROR = 8,
+ IP_VS_SCTP_ABORT = 9,
+ IP_VS_SCTP_EVENT_LAST = 10,
+};
+
+struct ip_vs_wrr_mark {
+ struct ip_vs_dest *cl;
+ int cw;
+ int mw;
+ int di;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_lblc_entry {
+ struct hlist_node list;
+ int af;
+ union nf_inet_addr addr;
+ struct ip_vs_dest *dest;
+ long unsigned int lastuse;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_lblc_table {
+ struct callback_head callback_head;
+ struct hlist_head bucket[1024];
+ struct timer_list periodic_timer;
+ struct ip_vs_service *svc;
+ atomic_t entries;
+ int max_size;
+ int rover;
+ int counter;
+ bool dead;
+};
+
+struct ip_vs_dest_set_elem {
+ struct list_head list;
+ struct ip_vs_dest *dest;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_dest_set {
+ atomic_t size;
+ long unsigned int lastmod;
+ struct list_head list;
+};
+
+struct ip_vs_lblcr_entry {
+ struct hlist_node list;
+ int af;
+ union nf_inet_addr addr;
+ struct ip_vs_dest_set set;
+ long unsigned int lastuse;
+ struct callback_head callback_head;
+};
+
+struct ip_vs_lblcr_table {
+ struct callback_head callback_head;
+ struct hlist_head bucket[1024];
+ atomic_t entries;
+ int max_size;
+ struct timer_list periodic_timer;
+ struct ip_vs_service *svc;
+ int rover;
+ int counter;
+ bool dead;
+};
+
+struct ip_vs_dh_bucket {
+ struct ip_vs_dest *dest;
+};
+
+struct ip_vs_dh_state {
+ struct ip_vs_dh_bucket buckets[256];
+ struct callback_head callback_head;
+};
+
+struct ip_vs_sh_bucket {
+ struct ip_vs_dest *dest;
+};
+
+struct ip_vs_sh_state {
+ struct callback_head callback_head;
+ struct ip_vs_sh_bucket buckets[256];
+};
+
+enum {
+ IP_VS_FTP_ACTIVE = 0,
+ IP_VS_FTP_PORT = 0,
+ IP_VS_FTP_PASV = 1,
+ IP_VS_FTP_EPRT = 2,
+ IP_VS_FTP_EPSV = 3,
+};
+
+struct rtmsg {
+ unsigned char rtm_family;
+ unsigned char rtm_dst_len;
+ unsigned char rtm_src_len;
+ unsigned char rtm_tos;
+ unsigned char rtm_table;
+ unsigned char rtm_protocol;
+ unsigned char rtm_scope;
+ unsigned char rtm_type;
+ unsigned int rtm_flags;
+};
+
+struct rtvia {
+ __kernel_sa_family_t rtvia_family;
+ __u8 rtvia_addr[0];
+};
+
+struct ipv4_addr_key {
+ __be32 addr;
+ int vif;
+};
+
+struct inetpeer_addr {
+ union {
+ struct ipv4_addr_key a4;
+ struct in6_addr a6;
+ u32 key[4];
+ };
+ __u16 family;
+};
+
+struct inet_peer {
+ struct rb_node rb_node;
+ struct inetpeer_addr daddr;
+ u32 metrics[17];
+ u32 rate_tokens;
+ u32 n_redirects;
+ long unsigned int rate_last;
+ union {
+ struct {
+ atomic_t rid;
+ };
+ struct callback_head rcu;
+ };
+ __u32 dtime;
+ refcount_t refcnt;
+};
+
+struct fib_rt_info {
+ struct fib_info *fi;
+ u32 tb_id;
+ __be32 dst;
+ int dst_len;
+ u8 tos;
+ u8 type;
+ u8 offload: 1;
+ u8 trap: 1;
+ u8 unused: 6;
+};
+
+struct uncached_list {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+struct rt_cache_stat {
+ unsigned int in_slow_tot;
+ unsigned int in_slow_mc;
+ unsigned int in_no_route;
+ unsigned int in_brd;
+ unsigned int in_martian_dst;
+ unsigned int in_martian_src;
+ unsigned int out_slow_tot;
+ unsigned int out_slow_mc;
+};
+
+struct fib_alias {
+ struct hlist_node fa_list;
+ struct fib_info *fa_info;
+ u8 fa_tos;
+ u8 fa_type;
+ u8 fa_state;
+ u8 fa_slen;
+ u32 tb_id;
+ s16 fa_default;
+ u8 offload: 1;
+ u8 trap: 1;
+ u8 unused: 6;
+ struct callback_head rcu;
+};
+
+struct fib_prop {
+ int error;
+ u8 scope;
+};
+
+struct raw_hashinfo {
+ rwlock_t lock;
+ struct hlist_head ht[256];
+};
+
+enum {
+ INET_FRAG_FIRST_IN = 1,
+ INET_FRAG_LAST_IN = 2,
+ INET_FRAG_COMPLETE = 4,
+ INET_FRAG_HASH_DEAD = 8,
+};
+
+struct ipq {
+ struct inet_frag_queue q;
+ u8 ecn;
+ u16 max_df_size;
+ int iif;
+ unsigned int rid;
+ struct inet_peer *peer;
+};
+
+struct ip_options_data {
+ struct ip_options_rcu opt;
+ char data[40];
+};
+
+struct ipcm_cookie {
+ struct sockcm_cookie sockc;
+ __be32 addr;
+ int oif;
+ struct ip_options_rcu *opt;
+ __u8 ttl;
+ __s16 tos;
+ char priority;
+ __u16 gso_size;
+};
+
+struct ip_fraglist_iter {
+ struct sk_buff *frag;
+ struct iphdr *iph;
+ int offset;
+ unsigned int hlen;
+};
+
+struct ip_frag_state {
+ bool DF;
+ unsigned int hlen;
+ unsigned int ll_rs;
+ unsigned int mtu;
+ unsigned int left;
+ int offset;
+ int ptr;
+ __be16 not_last_frag;
+};
+
+struct ip_reply_arg {
+ struct kvec iov[1];
+ int flags;
+ __wsum csum;
+ int csumoffset;
+ int bound_dev_if;
+ u8 tos;
+ kuid_t uid;
+};
+
+enum {
+ LWTUNNEL_XMIT_DONE = 0,
+ LWTUNNEL_XMIT_CONTINUE = 1,
+};
+
+struct ip_mreq_source {
+ __be32 imr_multiaddr;
+ __be32 imr_interface;
+ __be32 imr_sourceaddr;
+};
+
+struct ip_msfilter {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
+ __be32 imsf_slist[1];
+};
+
+struct group_req {
+ __u32 gr_interface;
+ struct __kernel_sockaddr_storage gr_group;
+};
+
+struct group_source_req {
+ __u32 gsr_interface;
+ struct __kernel_sockaddr_storage gsr_group;
+ struct __kernel_sockaddr_storage gsr_source;
+};
+
+struct group_filter {
+ __u32 gf_interface;
+ struct __kernel_sockaddr_storage gf_group;
+ __u32 gf_fmode;
+ __u32 gf_numsrc;
+ struct __kernel_sockaddr_storage gf_slist[1];
+};
+
+struct in_pktinfo {
+ int ipi_ifindex;
+ struct in_addr ipi_spec_dst;
+ struct in_addr ipi_addr;
+};
+
+struct bpfilter_umh_ops {
+ struct umh_info info;
+ struct mutex lock;
+ int (*sockopt)(struct sock *, int, char *, unsigned int, bool);
+ int (*start)();
+ bool stop;
+};
+
+struct tcpvegas_info {
+ __u32 tcpv_enabled;
+ __u32 tcpv_rttcnt;
+ __u32 tcpv_rtt;
+ __u32 tcpv_minrtt;
+};
+
+struct tcp_dctcp_info {
+ __u16 dctcp_enabled;
+ __u16 dctcp_ce_state;
+ __u32 dctcp_alpha;
+ __u32 dctcp_ab_ecn;
+ __u32 dctcp_ab_tot;
+};
+
+struct tcp_bbr_info {
+ __u32 bbr_bw_lo;
+ __u32 bbr_bw_hi;
+ __u32 bbr_min_rtt;
+ __u32 bbr_pacing_gain;
+ __u32 bbr_cwnd_gain;
+};
+
+union tcp_cc_info {
+ struct tcpvegas_info vegas;
+ struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
+};
+
+enum {
+ BPF_TCP_ESTABLISHED = 1,
+ BPF_TCP_SYN_SENT = 2,
+ BPF_TCP_SYN_RECV = 3,
+ BPF_TCP_FIN_WAIT1 = 4,
+ BPF_TCP_FIN_WAIT2 = 5,
+ BPF_TCP_TIME_WAIT = 6,
+ BPF_TCP_CLOSE = 7,
+ BPF_TCP_CLOSE_WAIT = 8,
+ BPF_TCP_LAST_ACK = 9,
+ BPF_TCP_LISTEN = 10,
+ BPF_TCP_CLOSING = 11,
+ BPF_TCP_NEW_SYN_RECV = 12,
+ BPF_TCP_MAX_STATES = 13,
+};
+
+enum inet_csk_ack_state_t {
+ ICSK_ACK_SCHED = 1,
+ ICSK_ACK_TIMER = 2,
+ ICSK_ACK_PUSHED = 4,
+ ICSK_ACK_PUSHED2 = 8,
+ ICSK_ACK_NOW = 16,
+};
+
+struct tcp_repair_opt {
+ __u32 opt_code;
+ __u32 opt_val;
+};
+
+struct tcp_repair_window {
+ __u32 snd_wl1;
+ __u32 snd_wnd;
+ __u32 max_window;
+ __u32 rcv_wnd;
+ __u32 rcv_wup;
+};
+
+enum {
+ TCP_NO_QUEUE = 0,
+ TCP_RECV_QUEUE = 1,
+ TCP_SEND_QUEUE = 2,
+ TCP_QUEUES_NR = 3,
+};
+
+struct tcp_info {
+ __u8 tcpi_state;
+ __u8 tcpi_ca_state;
+ __u8 tcpi_retransmits;
+ __u8 tcpi_probes;
+ __u8 tcpi_backoff;
+ __u8 tcpi_options;
+ __u8 tcpi_snd_wscale: 4;
+ __u8 tcpi_rcv_wscale: 4;
+ __u8 tcpi_delivery_rate_app_limited: 1;
+ __u8 tcpi_fastopen_client_fail: 2;
+ __u32 tcpi_rto;
+ __u32 tcpi_ato;
+ __u32 tcpi_snd_mss;
+ __u32 tcpi_rcv_mss;
+ __u32 tcpi_unacked;
+ __u32 tcpi_sacked;
+ __u32 tcpi_lost;
+ __u32 tcpi_retrans;
+ __u32 tcpi_fackets;
+ __u32 tcpi_last_data_sent;
+ __u32 tcpi_last_ack_sent;
+ __u32 tcpi_last_data_recv;
+ __u32 tcpi_last_ack_recv;
+ __u32 tcpi_pmtu;
+ __u32 tcpi_rcv_ssthresh;
+ __u32 tcpi_rtt;
+ __u32 tcpi_rttvar;
+ __u32 tcpi_snd_ssthresh;
+ __u32 tcpi_snd_cwnd;
+ __u32 tcpi_advmss;
+ __u32 tcpi_reordering;
+ __u32 tcpi_rcv_rtt;
+ __u32 tcpi_rcv_space;
+ __u32 tcpi_total_retrans;
+ __u64 tcpi_pacing_rate;
+ __u64 tcpi_max_pacing_rate;
+ __u64 tcpi_bytes_acked;
+ __u64 tcpi_bytes_received;
+ __u32 tcpi_segs_out;
+ __u32 tcpi_segs_in;
+ __u32 tcpi_notsent_bytes;
+ __u32 tcpi_min_rtt;
+ __u32 tcpi_data_segs_in;
+ __u32 tcpi_data_segs_out;
+ __u64 tcpi_delivery_rate;
+ __u64 tcpi_busy_time;
+ __u64 tcpi_rwnd_limited;
+ __u64 tcpi_sndbuf_limited;
+ __u32 tcpi_delivered;
+ __u32 tcpi_delivered_ce;
+ __u64 tcpi_bytes_sent;
+ __u64 tcpi_bytes_retrans;
+ __u32 tcpi_dsack_dups;
+ __u32 tcpi_reord_seen;
+ __u32 tcpi_rcv_ooopack;
+ __u32 tcpi_snd_wnd;
+};
+
+enum {
+ TCP_NLA_PAD = 0,
+ TCP_NLA_BUSY = 1,
+ TCP_NLA_RWND_LIMITED = 2,
+ TCP_NLA_SNDBUF_LIMITED = 3,
+ TCP_NLA_DATA_SEGS_OUT = 4,
+ TCP_NLA_TOTAL_RETRANS = 5,
+ TCP_NLA_PACING_RATE = 6,
+ TCP_NLA_DELIVERY_RATE = 7,
+ TCP_NLA_SND_CWND = 8,
+ TCP_NLA_REORDERING = 9,
+ TCP_NLA_MIN_RTT = 10,
+ TCP_NLA_RECUR_RETRANS = 11,
+ TCP_NLA_DELIVERY_RATE_APP_LMT = 12,
+ TCP_NLA_SNDQ_SIZE = 13,
+ TCP_NLA_CA_STATE = 14,
+ TCP_NLA_SND_SSTHRESH = 15,
+ TCP_NLA_DELIVERED = 16,
+ TCP_NLA_DELIVERED_CE = 17,
+ TCP_NLA_BYTES_SENT = 18,
+ TCP_NLA_BYTES_RETRANS = 19,
+ TCP_NLA_DSACK_DUPS = 20,
+ TCP_NLA_REORD_SEEN = 21,
+ TCP_NLA_SRTT = 22,
+ TCP_NLA_TIMEOUT_REHASH = 23,
+ TCP_NLA_BYTES_NOTSENT = 24,
+};
+
+struct tcp_zerocopy_receive {
+ __u64 address;
+ __u32 length;
+ __u32 recv_skip_hint;
+ __u32 inq;
+ __s32 err;
+};
+
+struct tcp_md5sig_pool {
+ struct ahash_request *md5_req;
+ void *scratch;
+};
+
+enum tcp_chrono {
+ TCP_CHRONO_UNSPEC = 0,
+ TCP_CHRONO_BUSY = 1,
+ TCP_CHRONO_RWND_LIMITED = 2,
+ TCP_CHRONO_SNDBUF_LIMITED = 3,
+ __TCP_CHRONO_MAX = 4,
+};
+
+struct tcp_splice_state {
+ struct pipe_inode_info *pipe;
+ size_t len;
+ unsigned int flags;
+};
+
+enum tcp_fastopen_client_fail {
+ TFO_STATUS_UNSPEC = 0,
+ TFO_COOKIE_UNAVAILABLE = 1,
+ TFO_DATA_NOT_ACKED = 2,
+ TFO_SYN_RETRANSMITTED = 3,
+};
+
+enum tcp_queue {
+ TCP_FRAG_IN_WRITE_QUEUE = 0,
+ TCP_FRAG_IN_RTX_QUEUE = 1,
+};
+
+enum tcp_ca_ack_event_flags {
+ CA_ACK_SLOWPATH = 1,
+ CA_ACK_WIN_UPDATE = 2,
+ CA_ACK_ECE = 4,
+};
+
+struct tcp_sacktag_state {
+ u32 reord;
+ u64 first_sackt;
+ u64 last_sackt;
+ struct rate_sample *rate;
+ int flag;
+ unsigned int mss_now;
+};
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE = 0,
+ PKT_HASH_TYPE_L2 = 1,
+ PKT_HASH_TYPE_L3 = 2,
+ PKT_HASH_TYPE_L4 = 3,
+};
+
+enum tsq_flags {
+ TSQF_THROTTLED = 1,
+ TSQF_QUEUED = 2,
+ TCPF_TSQ_DEFERRED = 4,
+ TCPF_WRITE_TIMER_DEFERRED = 8,
+ TCPF_DELACK_TIMER_DEFERRED = 16,
+ TCPF_MTU_REDUCED_DEFERRED = 32,
+};
+
+struct mptcp_out_options {};
+
+struct tcp_out_options {
+ u16 options;
+ u16 mss;
+ u8 ws;
+ u8 num_sack_blocks;
+ u8 hash_size;
+ __u8 *hash_location;
+ __u32 tsval;
+ __u32 tsecr;
+ struct tcp_fastopen_cookie *fastopen_cookie;
+ struct mptcp_out_options mptcp;
+};
+
+struct tsq_tasklet {
+ struct tasklet_struct tasklet;
+ struct list_head head;
+};
+
+struct tcp_md5sig {
+ struct __kernel_sockaddr_storage tcpm_addr;
+ __u8 tcpm_flags;
+ __u8 tcpm_prefixlen;
+ __u16 tcpm_keylen;
+ int tcpm_ifindex;
+ __u8 tcpm_key[80];
+};
+
+struct tcp_timewait_sock {
+ struct inet_timewait_sock tw_sk;
+ u32 tw_rcv_wnd;
+ u32 tw_ts_offset;
+ u32 tw_ts_recent;
+ u32 tw_last_oow_ack_time;
+ int tw_ts_recent_stamp;
+ u32 tw_tx_delay;
+ struct tcp_md5sig_key *tw_md5_key;
+};
+
+enum tcp_tw_status {
+ TCP_TW_SUCCESS = 0,
+ TCP_TW_RST = 1,
+ TCP_TW_ACK = 2,
+ TCP_TW_SYN = 3,
+};
+
+struct tcp4_pseudohdr {
+ __be32 saddr;
+ __be32 daddr;
+ __u8 pad;
+ __u8 protocol;
+ __be16 len;
+};
+
+enum tcp_seq_states {
+ TCP_SEQ_STATE_LISTENING = 0,
+ TCP_SEQ_STATE_ESTABLISHED = 1,
+};
+
+struct tcp_seq_afinfo {
+ sa_family_t family;
+};
+
+struct tcp_iter_state {
+ struct seq_net_private p;
+ enum tcp_seq_states state;
+ struct sock *syn_wait_sk;
+ int bucket;
+ int offset;
+ int sbucket;
+ int num;
+ loff_t last_pos;
+};
+
+enum tcp_metric_index {
+ TCP_METRIC_RTT = 0,
+ TCP_METRIC_RTTVAR = 1,
+ TCP_METRIC_SSTHRESH = 2,
+ TCP_METRIC_CWND = 3,
+ TCP_METRIC_REORDERING = 4,
+ TCP_METRIC_RTT_US = 5,
+ TCP_METRIC_RTTVAR_US = 6,
+ __TCP_METRIC_MAX = 7,
+};
+
+enum {
+ TCP_METRICS_ATTR_UNSPEC = 0,
+ TCP_METRICS_ATTR_ADDR_IPV4 = 1,
+ TCP_METRICS_ATTR_ADDR_IPV6 = 2,
+ TCP_METRICS_ATTR_AGE = 3,
+ TCP_METRICS_ATTR_TW_TSVAL = 4,
+ TCP_METRICS_ATTR_TW_TS_STAMP = 5,
+ TCP_METRICS_ATTR_VALS = 6,
+ TCP_METRICS_ATTR_FOPEN_MSS = 7,
+ TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8,
+ TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9,
+ TCP_METRICS_ATTR_FOPEN_COOKIE = 10,
+ TCP_METRICS_ATTR_SADDR_IPV4 = 11,
+ TCP_METRICS_ATTR_SADDR_IPV6 = 12,
+ TCP_METRICS_ATTR_PAD = 13,
+ __TCP_METRICS_ATTR_MAX = 14,
+};
+
+enum {
+ TCP_METRICS_CMD_UNSPEC = 0,
+ TCP_METRICS_CMD_GET = 1,
+ TCP_METRICS_CMD_DEL = 2,
+ __TCP_METRICS_CMD_MAX = 3,
+};
+
+struct tcp_fastopen_metrics {
+ u16 mss;
+ u16 syn_loss: 10;
+ u16 try_exp: 2;
+ long unsigned int last_syn_loss;
+ struct tcp_fastopen_cookie cookie;
+};
+
+struct tcp_metrics_block {
+ struct tcp_metrics_block *tcpm_next;
+ possible_net_t tcpm_net;
+ struct inetpeer_addr tcpm_saddr;
+ struct inetpeer_addr tcpm_daddr;
+ long unsigned int tcpm_stamp;
+ u32 tcpm_lock;
+ u32 tcpm_vals[5];
+ struct tcp_fastopen_metrics tcpm_fastopen;
+ struct callback_head callback_head;
+};
+
+struct tcpm_hash_bucket {
+ struct tcp_metrics_block *chain;
+};
+
+struct icmp_filter {
+ __u32 data;
+};
+
+struct raw_iter_state {
+ struct seq_net_private p;
+ int bucket;
+};
+
+struct raw_sock {
+ struct inet_sock inet;
+ struct icmp_filter filter;
+ u32 ipmr_table;
+};
+
+struct raw_frag_vec {
+ struct msghdr *msg;
+ union {
+ struct icmphdr icmph;
+ char c[1];
+ } hdr;
+ int hlen;
+};
+
+struct udp_sock {
+ struct inet_sock inet;
+ int pending;
+ unsigned int corkflag;
+ __u8 encap_type;
+ unsigned char no_check6_tx: 1;
+ unsigned char no_check6_rx: 1;
+ unsigned char encap_enabled: 1;
+ unsigned char gro_enabled: 1;
+ __u16 len;
+ __u16 gso_size;
+ __u16 pcslen;
+ __u16 pcrlen;
+ __u8 pcflag;
+ __u8 unused[3];
+ int (*encap_rcv)(struct sock *, struct sk_buff *);
+ int (*encap_err_lookup)(struct sock *, struct sk_buff *);
+ void (*encap_destroy)(struct sock *);
+ struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *);
+ int (*gro_complete)(struct sock *, struct sk_buff *, int);
+ struct sk_buff_head reader_queue;
+ int forward_deficit;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct udp_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+ __u16 cscov;
+ __u8 partial_cov;
+};
+
+struct udp_dev_scratch {
+ u32 _tsize_state;
+ u16 len;
+ bool is_linear;
+ bool csum_unnecessary;
+};
+
+struct udp_seq_afinfo {
+ sa_family_t family;
+ struct udp_table *udp_table;
+};
+
+struct udp_iter_state {
+ struct seq_net_private p;
+ int bucket;
+};
+
+struct inet_protosw {
+ struct list_head list;
+ short unsigned int type;
+ short unsigned int protocol;
+ struct proto *prot;
+ const struct proto_ops *ops;
+ unsigned char flags;
+};
+
+typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *);
+
+typedef struct sock * (*udp_lookup_t)(struct sk_buff *, __be16, __be16);
+
+struct arpreq {
+ struct sockaddr arp_pa;
+ struct sockaddr arp_ha;
+ int arp_flags;
+ struct sockaddr arp_netmask;
+ char arp_dev[16];
+};
+
+typedef struct {
+ char ax25_call[7];
+} ax25_address;
+
+enum {
+ AX25_VALUES_IPDEFMODE = 0,
+ AX25_VALUES_AXDEFMODE = 1,
+ AX25_VALUES_BACKOFF = 2,
+ AX25_VALUES_CONMODE = 3,
+ AX25_VALUES_WINDOW = 4,
+ AX25_VALUES_EWINDOW = 5,
+ AX25_VALUES_T1 = 6,
+ AX25_VALUES_T2 = 7,
+ AX25_VALUES_T3 = 8,
+ AX25_VALUES_IDLE = 9,
+ AX25_VALUES_N2 = 10,
+ AX25_VALUES_PACLEN = 11,
+ AX25_VALUES_PROTOCOL = 12,
+ AX25_VALUES_DS_TIMEOUT = 13,
+ AX25_MAX_VALUES = 14,
+};
+
+struct ax25_dev {
+ struct ax25_dev *next;
+ struct net_device *dev;
+ struct net_device *forward;
+ struct ctl_table_header *sysheader;
+ int values[14];
+};
+
+typedef struct ax25_dev ax25_dev;
+
+enum {
+ XFRM_LOOKUP_ICMP = 1,
+ XFRM_LOOKUP_QUEUE = 2,
+ XFRM_LOOKUP_KEEP_DST_REF = 4,
+};
+
+struct pingv6_ops {
+ int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *);
+ void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *);
+ void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *);
+ int (*icmpv6_err_convert)(u8, u8, int *);
+ void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *);
+ int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int);
+};
+
+struct icmp_bxm {
+ struct sk_buff *skb;
+ int offset;
+ int data_len;
+ struct {
+ struct icmphdr icmph;
+ __be32 times[3];
+ } data;
+ int head_len;
+ struct ip_options_data replyopts;
+};
+
+struct icmp_control {
+ bool (*handler)(struct sk_buff *);
+ short int error;
+};
+
+struct ifaddrmsg {
+ __u8 ifa_family;
+ __u8 ifa_prefixlen;
+ __u8 ifa_flags;
+ __u8 ifa_scope;
+ __u32 ifa_index;
+};
+
+enum {
+ IFA_UNSPEC = 0,
+ IFA_ADDRESS = 1,
+ IFA_LOCAL = 2,
+ IFA_LABEL = 3,
+ IFA_BROADCAST = 4,
+ IFA_ANYCAST = 5,
+ IFA_CACHEINFO = 6,
+ IFA_MULTICAST = 7,
+ IFA_FLAGS = 8,
+ IFA_RT_PRIORITY = 9,
+ IFA_TARGET_NETNSID = 10,
+ __IFA_MAX = 11,
+};
+
+struct ifa_cacheinfo {
+ __u32 ifa_prefered;
+ __u32 ifa_valid;
+ __u32 cstamp;
+ __u32 tstamp;
+};
+
+enum {
+ IFLA_INET_UNSPEC = 0,
+ IFLA_INET_CONF = 1,
+ __IFLA_INET_MAX = 2,
+};
+
+struct in_validator_info {
+ __be32 ivi_addr;
+ struct in_device *ivi_dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct netconfmsg {
+ __u8 ncm_family;
+};
+
+enum {
+ NETCONFA_UNSPEC = 0,
+ NETCONFA_IFINDEX = 1,
+ NETCONFA_FORWARDING = 2,
+ NETCONFA_RP_FILTER = 3,
+ NETCONFA_MC_FORWARDING = 4,
+ NETCONFA_PROXY_NEIGH = 5,
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6,
+ NETCONFA_INPUT = 7,
+ NETCONFA_BC_FORWARDING = 8,
+ __NETCONFA_MAX = 9,
+};
+
+struct inet_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
+};
+
+struct devinet_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table devinet_vars[33];
+};
+
+struct rtentry {
+ long unsigned int rt_pad1;
+ struct sockaddr rt_dst;
+ struct sockaddr rt_gateway;
+ struct sockaddr rt_genmask;
+ short unsigned int rt_flags;
+ short int rt_pad2;
+ long unsigned int rt_pad3;
+ void *rt_pad4;
+ short int rt_metric;
+ char *rt_dev;
+ long unsigned int rt_mtu;
+ long unsigned int rt_window;
+ short unsigned int rt_irtt;
+};
+
+struct igmphdr {
+ __u8 type;
+ __u8 code;
+ __sum16 csum;
+ __be32 group;
+};
+
+struct igmpv3_grec {
+ __u8 grec_type;
+ __u8 grec_auxwords;
+ __be16 grec_nsrcs;
+ __be32 grec_mca;
+ __be32 grec_src[0];
+};
+
+struct igmpv3_report {
+ __u8 type;
+ __u8 resv1;
+ __sum16 csum;
+ __be16 resv2;
+ __be16 ngrec;
+ struct igmpv3_grec grec[0];
+};
+
+struct igmpv3_query {
+ __u8 type;
+ __u8 code;
+ __sum16 csum;
+ __be32 group;
+ __u8 qrv: 3;
+ __u8 suppress: 1;
+ __u8 resv: 4;
+ __u8 qqic;
+ __be16 nsrcs;
+ __be32 srcs[0];
+};
+
+struct igmp_mc_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+ struct in_device *in_dev;
+};
+
+struct igmp_mcf_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+ struct in_device *idev;
+ struct ip_mc_list *im;
+};
+
+struct rtnexthop {
+ short unsigned int rtnh_len;
+ unsigned char rtnh_flags;
+ unsigned char rtnh_hops;
+ int rtnh_ifindex;
+};
+
+struct fib_config {
+ u8 fc_dst_len;
+ u8 fc_tos;
+ u8 fc_protocol;
+ u8 fc_scope;
+ u8 fc_type;
+ u8 fc_gw_family;
+ u32 fc_table;
+ __be32 fc_dst;
+ union {
+ __be32 fc_gw4;
+ struct in6_addr fc_gw6;
+ };
+ int fc_oif;
+ u32 fc_flags;
+ u32 fc_priority;
+ __be32 fc_prefsrc;
+ u32 fc_nh_id;
+ struct nlattr *fc_mx;
+ struct rtnexthop *fc_mp;
+ int fc_mx_len;
+ int fc_mp_len;
+ u32 fc_flow;
+ u32 fc_nlflags;
+ struct nl_info fc_nlinfo;
+ struct nlattr *fc_encap;
+ u16 fc_encap_type;
+};
+
+struct fib_result_nl {
+ __be32 fl_addr;
+ u32 fl_mark;
+ unsigned char fl_tos;
+ unsigned char fl_scope;
+ unsigned char tb_id_in;
+ unsigned char tb_id;
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+ int err;
+};
+
+struct fib_dump_filter {
+ u32 table_id;
+ bool filter_set;
+ bool dump_routes;
+ bool dump_exceptions;
+ unsigned char protocol;
+ unsigned char rt_type;
+ unsigned int flags;
+ struct net_device *dev;
+};
+
+struct fib_nh_notifier_info {
+ struct fib_notifier_info info;
+ struct fib_nh *fib_nh;
+};
+
+enum lwtunnel_encap_types {
+ LWTUNNEL_ENCAP_NONE = 0,
+ LWTUNNEL_ENCAP_MPLS = 1,
+ LWTUNNEL_ENCAP_IP = 2,
+ LWTUNNEL_ENCAP_ILA = 3,
+ LWTUNNEL_ENCAP_IP6 = 4,
+ LWTUNNEL_ENCAP_SEG6 = 5,
+ LWTUNNEL_ENCAP_BPF = 6,
+ LWTUNNEL_ENCAP_SEG6_LOCAL = 7,
+ LWTUNNEL_ENCAP_RPL = 8,
+ __LWTUNNEL_ENCAP_MAX = 9,
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info;
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 tb_id;
+};
+
+typedef unsigned int t_key;
+
+struct key_vector {
+ t_key key;
+ unsigned char pos;
+ unsigned char bits;
+ unsigned char slen;
+ union {
+ struct hlist_head leaf;
+ struct key_vector *tnode[0];
+ };
+};
+
+struct tnode {
+ struct callback_head rcu;
+ t_key empty_children;
+ t_key full_children;
+ struct key_vector *parent;
+ struct key_vector kv[1];
+};
+
+struct trie_stat {
+ unsigned int totdepth;
+ unsigned int maxdepth;
+ unsigned int tnodes;
+ unsigned int leaves;
+ unsigned int nullpointers;
+ unsigned int prefixes;
+ unsigned int nodesizes[32];
+};
+
+struct trie {
+ struct key_vector kv[1];
+};
+
+struct fib_trie_iter {
+ struct seq_net_private p;
+ struct fib_table *tb;
+ struct key_vector *tnode;
+ unsigned int index;
+ unsigned int depth;
+};
+
+struct fib_route_iter {
+ struct seq_net_private p;
+ struct fib_table *main_tb;
+ struct key_vector *tnode;
+ loff_t pos;
+ t_key key;
+};
+
+struct ipfrag_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ };
+ struct sk_buff *next_frag;
+ int frag_run_len;
+};
+
+struct ping_iter_state {
+ struct seq_net_private p;
+ int bucket;
+ sa_family_t family;
+};
+
+struct pingfakehdr {
+ struct icmphdr icmph;
+ struct msghdr *msg;
+ sa_family_t family;
+ __wsum wcheck;
+};
+
+struct ping_table {
+ struct hlist_nulls_head hash[64];
+ rwlock_t lock;
+};
+
+enum lwtunnel_ip_t {
+ LWTUNNEL_IP_UNSPEC = 0,
+ LWTUNNEL_IP_ID = 1,
+ LWTUNNEL_IP_DST = 2,
+ LWTUNNEL_IP_SRC = 3,
+ LWTUNNEL_IP_TTL = 4,
+ LWTUNNEL_IP_TOS = 5,
+ LWTUNNEL_IP_FLAGS = 6,
+ LWTUNNEL_IP_PAD = 7,
+ LWTUNNEL_IP_OPTS = 8,
+ __LWTUNNEL_IP_MAX = 9,
+};
+
+enum lwtunnel_ip6_t {
+ LWTUNNEL_IP6_UNSPEC = 0,
+ LWTUNNEL_IP6_ID = 1,
+ LWTUNNEL_IP6_DST = 2,
+ LWTUNNEL_IP6_SRC = 3,
+ LWTUNNEL_IP6_HOPLIMIT = 4,
+ LWTUNNEL_IP6_TC = 5,
+ LWTUNNEL_IP6_FLAGS = 6,
+ LWTUNNEL_IP6_PAD = 7,
+ LWTUNNEL_IP6_OPTS = 8,
+ __LWTUNNEL_IP6_MAX = 9,
+};
+
+enum {
+ LWTUNNEL_IP_OPTS_UNSPEC = 0,
+ LWTUNNEL_IP_OPTS_GENEVE = 1,
+ LWTUNNEL_IP_OPTS_VXLAN = 2,
+ LWTUNNEL_IP_OPTS_ERSPAN = 3,
+ __LWTUNNEL_IP_OPTS_MAX = 4,
+};
+
+enum {
+ LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0,
+ LWTUNNEL_IP_OPT_GENEVE_CLASS = 1,
+ LWTUNNEL_IP_OPT_GENEVE_TYPE = 2,
+ LWTUNNEL_IP_OPT_GENEVE_DATA = 3,
+ __LWTUNNEL_IP_OPT_GENEVE_MAX = 4,
+};
+
+enum {
+ LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0,
+ LWTUNNEL_IP_OPT_VXLAN_GBP = 1,
+ __LWTUNNEL_IP_OPT_VXLAN_MAX = 2,
+};
+
+enum {
+ LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0,
+ LWTUNNEL_IP_OPT_ERSPAN_VER = 1,
+ LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2,
+ LWTUNNEL_IP_OPT_ERSPAN_DIR = 3,
+ LWTUNNEL_IP_OPT_ERSPAN_HWID = 4,
+ __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5,
+};
+
+struct lwtunnel_encap_ops {
+ int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *);
+ void (*destroy_state)(struct lwtunnel_state *);
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
+ int (*input)(struct sk_buff *);
+ int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *);
+ int (*get_encap_size)(struct lwtunnel_state *);
+ int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *);
+ int (*xmit)(struct sk_buff *);
+ struct module *owner;
+};
+
+struct ip6_tnl_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *);
+ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *);
+ int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32);
+};
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+ u8 length: 5;
+ u8 r3: 1;
+ u8 r2: 1;
+ u8 r1: 1;
+ u8 opt_data[0];
+};
+
+struct vxlan_metadata {
+ u32 gbp;
+};
+
+struct erspan_md2 {
+ __be32 timestamp;
+ __be16 sgt;
+ __u8 hwid_upper: 2;
+ __u8 ft: 5;
+ __u8 p: 1;
+ __u8 o: 1;
+ __u8 gra: 2;
+ __u8 dir: 1;
+ __u8 hwid: 4;
+};
+
+struct erspan_metadata {
+ int version;
+ union {
+ __be32 index;
+ struct erspan_md2 md2;
+ } u;
+};
+
+struct nhmsg {
+ unsigned char nh_family;
+ unsigned char nh_scope;
+ unsigned char nh_protocol;
+ unsigned char resvd;
+ unsigned int nh_flags;
+};
+
+struct nexthop_grp {
+ __u32 id;
+ __u8 weight;
+ __u8 resvd1;
+ __u16 resvd2;
+};
+
+enum {
+ NEXTHOP_GRP_TYPE_MPATH = 0,
+ __NEXTHOP_GRP_TYPE_MAX = 1,
+};
+
+enum {
+ NHA_UNSPEC = 0,
+ NHA_ID = 1,
+ NHA_GROUP = 2,
+ NHA_GROUP_TYPE = 3,
+ NHA_BLACKHOLE = 4,
+ NHA_OIF = 5,
+ NHA_GATEWAY = 6,
+ NHA_ENCAP_TYPE = 7,
+ NHA_ENCAP = 8,
+ NHA_GROUPS = 9,
+ NHA_MASTER = 10,
+ NHA_FDB = 11,
+ __NHA_MAX = 12,
+};
+
+struct nh_config {
+ u32 nh_id;
+ u8 nh_family;
+ u8 nh_protocol;
+ u8 nh_blackhole;
+ u8 nh_fdb;
+ u32 nh_flags;
+ int nh_ifindex;
+ struct net_device *dev;
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } gw;
+ struct nlattr *nh_grp;
+ u16 nh_grp_type;
+ struct nlattr *nh_encap;
+ u16 nh_encap_type;
+ u32 nlflags;
+ struct nl_info nlinfo;
+};
+
+enum nexthop_event_type {
+ NEXTHOP_EVENT_ADD = 0,
+ NEXTHOP_EVENT_DEL = 1,
+};
+
+enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE = 0,
+ TUNNEL_ENCAP_FOU = 1,
+ TUNNEL_ENCAP_GUE = 2,
+ TUNNEL_ENCAP_MPLS = 3,
+};
+
+struct ip_tunnel_prl_entry {
+ struct ip_tunnel_prl_entry *next;
+ __be32 addr;
+ u16 flags;
+ struct callback_head callback_head;
+};
+
+struct ip_tunnel {
+ struct ip_tunnel *next;
+ struct hlist_node hash_node;
+ struct net_device *dev;
+ struct net *net;
+ long unsigned int err_time;
+ int err_count;
+ u32 i_seqno;
+ u32 o_seqno;
+ int tun_hlen;
+ u32 index;
+ u8 erspan_ver;
+ u8 dir;
+ u16 hwid;
+ struct dst_cache dst_cache;
+ struct ip_tunnel_parm parms;
+ int mlink;
+ int encap_hlen;
+ int hlen;
+ struct ip_tunnel_encap encap;
+ struct ip_tunnel_prl_entry *prl;
+ unsigned int prl_count;
+ unsigned int ip_tnl_net_id;
+ struct gro_cells gro_cells;
+ __u32 fwmark;
+ bool collect_md;
+ bool ignore_df;
+};
+
+struct tnl_ptk_info {
+ __be16 flags;
+ __be16 proto;
+ __be32 key;
+ __be32 seq;
+ int hdr_len;
+};
+
+struct ip_tunnel_net {
+ struct net_device *fb_tunnel_dev;
+ struct rtnl_link_ops *rtnl_link_ops;
+ struct hlist_head tunnels[128];
+ struct ip_tunnel *collect_md_tun;
+ int type;
+};
+
+struct snmp_mib {
+ const char *name;
+ int entry;
+};
+
+struct xfrm_tunnel {
+ int (*handler)(struct sk_buff *);
+ int (*err_handler)(struct sk_buff *, u32);
+ struct xfrm_tunnel *next;
+ int priority;
+};
+
+enum rpc_display_format_t {
+ RPC_DISPLAY_ADDR = 0,
+ RPC_DISPLAY_PORT = 1,
+ RPC_DISPLAY_PROTO = 2,
+ RPC_DISPLAY_HEX_ADDR = 3,
+ RPC_DISPLAY_HEX_PORT = 4,
+ RPC_DISPLAY_NETID = 5,
+ RPC_DISPLAY_MAX = 6,
+};
+
+struct ic_device {
+ struct ic_device *next;
+ struct net_device *dev;
+ short unsigned int flags;
+ short int able;
+ __be32 xid;
+};
+
+struct bootp_pkt {
+ struct iphdr iph;
+ struct udphdr udph;
+ u8 op;
+ u8 htype;
+ u8 hlen;
+ u8 hops;
+ __be32 xid;
+ __be16 secs;
+ __be16 flags;
+ __be32 client_ip;
+ __be32 your_ip;
+ __be32 server_ip;
+ __be32 relay_ip;
+ u8 hw_addr[16];
+ u8 serv_name[64];
+ u8 boot_file[128];
+ u8 exten[312];
+};
+
+enum asn1_class {
+ ASN1_UNIV = 0,
+ ASN1_APPL = 1,
+ ASN1_CONT = 2,
+ ASN1_PRIV = 3,
+};
+
+enum nf_nat_snmp_basic_actions {
+ ACT_snmp_helper = 0,
+ ACT_snmp_version = 1,
+ NR__nf_nat_snmp_basic_actions = 2,
+};
+
+struct asn1_decoder___2;
+
+struct snmp_ctx {
+ unsigned char *begin;
+ __sum16 *check;
+ __be32 from;
+ __be32 to;
+};
+
+struct xt_get_revision {
+ char name[29];
+ __u8 revision;
+};
+
+struct ipt_icmp {
+ __u8 type;
+ __u8 code[2];
+ __u8 invflags;
+};
+
+struct ipt_getinfo {
+ char name[32];
+ unsigned int valid_hooks;
+ unsigned int hook_entry[5];
+ unsigned int underflow[5];
+ unsigned int num_entries;
+ unsigned int size;
+};
+
+struct ipt_replace {
+ char name[32];
+ unsigned int valid_hooks;
+ unsigned int num_entries;
+ unsigned int size;
+ unsigned int hook_entry[5];
+ unsigned int underflow[5];
+ unsigned int num_counters;
+ struct xt_counters *counters;
+ struct ipt_entry entries[0];
+};
+
+struct ipt_get_entries {
+ char name[32];
+ unsigned int size;
+ struct ipt_entry entrytable[0];
+};
+
+struct ipt_standard {
+ struct ipt_entry entry;
+ struct xt_standard_target target;
+};
+
+struct ipt_error {
+ struct ipt_entry entry;
+ struct xt_error_target target;
+};
+
+enum nf_ip_trace_comments {
+ NF_IP_TRACE_COMMENT_RULE = 0,
+ NF_IP_TRACE_COMMENT_RETURN = 1,
+ NF_IP_TRACE_COMMENT_POLICY = 2,
+};
+
+struct ipt_ah {
+ __u32 spis[2];
+ __u8 invflags;
+};
+
+enum {
+ XT_RPFILTER_LOOSE = 1,
+ XT_RPFILTER_VALID_MARK = 2,
+ XT_RPFILTER_ACCEPT_LOCAL = 4,
+ XT_RPFILTER_INVERT = 8,
+ XT_RPFILTER_OPTION_MASK = 15,
+};
+
+struct xt_rpfilter_info {
+ __u8 flags;
+};
+
+enum clusterip_hashmode {
+ CLUSTERIP_HASHMODE_SIP = 0,
+ CLUSTERIP_HASHMODE_SIP_SPT = 1,
+ CLUSTERIP_HASHMODE_SIP_SPT_DPT = 2,
+};
+
+struct clusterip_config;
+
+struct ipt_clusterip_tgt_info {
+ __u32 flags;
+ __u8 clustermac[6];
+ __u16 num_total_nodes;
+ __u16 num_local_nodes;
+ __u16 local_nodes[16];
+ __u32 hash_mode;
+ __u32 hash_initval;
+ struct clusterip_config *config;
+};
+
+struct clusterip_config {
+ struct list_head list;
+ refcount_t refcount;
+ refcount_t entries;
+ __be32 clusterip;
+ u_int8_t clustermac[6];
+ int ifindex;
+ u_int16_t num_total_nodes;
+ long unsigned int local_nodes;
+ struct proc_dir_entry *pde;
+ enum clusterip_hashmode hash_mode;
+ u_int32_t hash_initval;
+ struct callback_head rcu;
+ struct net *net;
+ char ifname[16];
+};
+
+struct clusterip_net {
+ struct list_head configs;
+ spinlock_t lock;
+ struct proc_dir_entry *procdir;
+ struct mutex mutex;
+};
+
+struct arp_payload {
+ u_int8_t src_hw[6];
+ __be32 src_ip;
+ u_int8_t dst_hw[6];
+ __be32 dst_ip;
+} __attribute__((packed));
+
+struct clusterip_seq_position {
+ unsigned int pos;
+ unsigned int weight;
+ unsigned int bit;
+ long unsigned int val;
+};
+
+struct ipt_ECN_info {
+ __u8 operation;
+ __u8 ip_ect;
+ union {
+ struct {
+ __u8 ece: 1;
+ __u8 cwr: 1;
+ } tcp;
+ } proto;
+};
+
+enum ipt_reject_with {
+ IPT_ICMP_NET_UNREACHABLE = 0,
+ IPT_ICMP_HOST_UNREACHABLE = 1,
+ IPT_ICMP_PROT_UNREACHABLE = 2,
+ IPT_ICMP_PORT_UNREACHABLE = 3,
+ IPT_ICMP_ECHOREPLY = 4,
+ IPT_ICMP_NET_PROHIBITED = 5,
+ IPT_ICMP_HOST_PROHIBITED = 6,
+ IPT_TCP_RESET = 7,
+ IPT_ICMP_ADMIN_PROHIBITED = 8,
+};
+
+struct ipt_reject_info {
+ enum ipt_reject_with with;
+};
+
+struct arpt_devaddr_info {
+ char addr[16];
+ char mask[16];
+};
+
+struct arpt_arp {
+ struct in_addr src;
+ struct in_addr tgt;
+ struct in_addr smsk;
+ struct in_addr tmsk;
+ __u8 arhln;
+ __u8 arhln_mask;
+ struct arpt_devaddr_info src_devaddr;
+ struct arpt_devaddr_info tgt_devaddr;
+ __be16 arpop;
+ __be16 arpop_mask;
+ __be16 arhrd;
+ __be16 arhrd_mask;
+ __be16 arpro;
+ __be16 arpro_mask;
+ char iniface[16];
+ char outiface[16];
+ unsigned char iniface_mask[16];
+ unsigned char outiface_mask[16];
+ __u8 flags;
+ __u16 invflags;
+};
+
+struct arpt_entry {
+ struct arpt_arp arp;
+ __u16 target_offset;
+ __u16 next_offset;
+ unsigned int comefrom;
+ struct xt_counters counters;
+ unsigned char elems[0];
+};
+
+struct arpt_getinfo {
+ char name[32];
+ unsigned int valid_hooks;
+ unsigned int hook_entry[3];
+ unsigned int underflow[3];
+ unsigned int num_entries;
+ unsigned int size;
+};
+
+struct arpt_replace {
+ char name[32];
+ unsigned int valid_hooks;
+ unsigned int num_entries;
+ unsigned int size;
+ unsigned int hook_entry[3];
+ unsigned int underflow[3];
+ unsigned int num_counters;
+ struct xt_counters *counters;
+ struct arpt_entry entries[0];
+};
+
+struct arpt_get_entries {
+ char name[32];
+ unsigned int size;
+ struct arpt_entry entrytable[0];
+};
+
+struct arpt_standard {
+ struct arpt_entry entry;
+ struct xt_standard_target target;
+};
+
+struct arpt_error {
+ struct arpt_entry entry;
+ struct xt_error_target target;
+};
+
+struct arpt_mangle {
+ char src_devaddr[16];
+ char tgt_devaddr[16];
+ union {
+ struct in_addr src_ip;
+ } u_s;
+ union {
+ struct in_addr tgt_ip;
+ } u_t;
+ __u8 flags;
+ int target;
+};
+
+enum {
+ INET_DIAG_NONE = 0,
+ INET_DIAG_MEMINFO = 1,
+ INET_DIAG_INFO = 2,
+ INET_DIAG_VEGASINFO = 3,
+ INET_DIAG_CONG = 4,
+ INET_DIAG_TOS = 5,
+ INET_DIAG_TCLASS = 6,
+ INET_DIAG_SKMEMINFO = 7,
+ INET_DIAG_SHUTDOWN = 8,
+ INET_DIAG_DCTCPINFO = 9,
+ INET_DIAG_PROTOCOL = 10,
+ INET_DIAG_SKV6ONLY = 11,
+ INET_DIAG_LOCALS = 12,
+ INET_DIAG_PEERS = 13,
+ INET_DIAG_PAD = 14,
+ INET_DIAG_MARK = 15,
+ INET_DIAG_BBRINFO = 16,
+ INET_DIAG_CLASS_ID = 17,
+ INET_DIAG_MD5SIG = 18,
+ INET_DIAG_ULP_INFO = 19,
+ INET_DIAG_SK_BPF_STORAGES = 20,
+ INET_DIAG_CGROUP_ID = 21,
+ __INET_DIAG_MAX = 22,
+};
+
+enum bbr_mode {
+ BBR_STARTUP = 0,
+ BBR_DRAIN = 1,
+ BBR_PROBE_BW = 2,
+ BBR_PROBE_RTT = 3,
+};
+
+struct bbr {
+ u32 min_rtt_us;
+ u32 min_rtt_stamp;
+ u32 probe_rtt_done_stamp;
+ struct minmax bw;
+ u32 rtt_cnt;
+ u32 next_rtt_delivered;
+ u64 cycle_mstamp;
+ u32 mode: 3;
+ u32 prev_ca_state: 3;
+ u32 packet_conservation: 1;
+ u32 round_start: 1;
+ u32 idle_restart: 1;
+ u32 probe_rtt_round_done: 1;
+ u32 unused: 13;
+ u32 lt_is_sampling: 1;
+ u32 lt_rtt_cnt: 7;
+ u32 lt_use_bw: 1;
+ u32 lt_bw;
+ u32 lt_last_delivered;
+ u32 lt_last_stamp;
+ u32 lt_last_lost;
+ u32 pacing_gain: 10;
+ u32 cwnd_gain: 10;
+ u32 full_bw_reached: 1;
+ u32 full_bw_cnt: 2;
+ u32 cycle_idx: 3;
+ u32 has_seen_rtt: 1;
+ u32 unused_b: 5;
+ u32 prior_cwnd;
+ u32 full_bw;
+ u64 ack_epoch_mstamp;
+ u16 extra_acked[2];
+ u32 ack_epoch_acked: 20;
+ u32 extra_acked_win_rtts: 5;
+ u32 extra_acked_win_idx: 1;
+ u32 unused_c: 6;
+};
+
+struct tls_rec {
+ struct list_head list;
+ int tx_ready;
+ int tx_flags;
+ struct sk_msg msg_plaintext;
+ struct sk_msg msg_encrypted;
+ struct scatterlist sg_aead_in[2];
+ struct scatterlist sg_aead_out[2];
+ char content_type;
+ struct scatterlist sg_content_type;
+ char aad_space[13];
+ u8 iv_data[16];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[0];
+};
+
+struct tx_work {
+ struct delayed_work work;
+ struct sock *sk;
+};
+
+struct tls_sw_context_tx {
+ struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
+ struct tx_work tx_work;
+ struct tls_rec *open_rec;
+ struct list_head tx_list;
+ atomic_t encrypt_pending;
+ spinlock_t encrypt_compl_lock;
+ int async_notify;
+ u8 async_capable: 1;
+ long unsigned int tx_bitmask;
+};
+
+enum {
+ TCP_BPF_IPV4 = 0,
+ TCP_BPF_IPV6 = 1,
+ TCP_BPF_NUM_PROTS = 2,
+};
+
+enum {
+ TCP_BPF_BASE = 0,
+ TCP_BPF_TX = 1,
+ TCP_BPF_NUM_CFGS = 2,
+};
+
+enum {
+ UDP_BPF_IPV4 = 0,
+ UDP_BPF_IPV6 = 1,
+ UDP_BPF_NUM_PROTS = 2,
+};
+
+struct xfrm_policy_afinfo {
+ struct dst_ops *dst_ops;
+ struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32);
+ int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32);
+ int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *);
+ struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *);
+};
+
+struct xfrm_state_afinfo {
+ u8 family;
+ u8 proto;
+ const struct xfrm_type_offload *type_offload_esp;
+ const struct xfrm_type *type_esp;
+ const struct xfrm_type *type_ipip;
+ const struct xfrm_type *type_ipip6;
+ const struct xfrm_type *type_comp;
+ const struct xfrm_type *type_ah;
+ const struct xfrm_type *type_routing;
+ const struct xfrm_type *type_dstopts;
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
+ int (*transport_finish)(struct sk_buff *, int);
+ void (*local_error)(struct sk_buff *, u32);
+};
+
+struct ip6_tnl;
+
+struct xfrm_tunnel_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+ union {
+ struct ip_tunnel *ip4;
+ struct ip6_tnl *ip6;
+ } tunnel;
+};
+
+struct xfrm_mode_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+ __be16 id;
+ __be16 frag_off;
+ u8 ihl;
+ u8 tos;
+ u8 ttl;
+ u8 protocol;
+ u8 optlen;
+ u8 flow_lbl[3];
+};
+
+struct xfrm_spi_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+ unsigned int daddroff;
+ unsigned int family;
+ __be32 seq;
+};
+
+struct xfrm_input_afinfo {
+ unsigned int family;
+ int (*callback)(struct sk_buff *, u8, int);
+};
+
+struct xfrm4_protocol {
+ int (*handler)(struct sk_buff *);
+ int (*input_handler)(struct sk_buff *, int, __be32, int);
+ int (*cb_handler)(struct sk_buff *, int);
+ int (*err_handler)(struct sk_buff *, u32);
+ struct xfrm4_protocol *next;
+ int priority;
+};
+
+typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32);
+
+enum {
+ INET_ULP_INFO_UNSPEC = 0,
+ INET_ULP_INFO_NAME = 1,
+ INET_ULP_INFO_TLS = 2,
+ INET_ULP_INFO_MPTCP = 3,
+ __INET_ULP_INFO_MAX = 4,
+};
+
+enum {
+ TLS_INFO_UNSPEC = 0,
+ TLS_INFO_VERSION = 1,
+ TLS_INFO_CIPHER = 2,
+ TLS_INFO_TXCONF = 3,
+ TLS_INFO_RXCONF = 4,
+ __TLS_INFO_MAX = 5,
+};
+
+enum {
+ TLS_BASE = 0,
+ TLS_SW = 1,
+ TLS_HW = 2,
+ TLS_HW_RECORD = 3,
+ TLS_NUM_CONFIG = 4,
+};
+
+enum {
+ TLSV4 = 0,
+ TLSV6 = 1,
+ TLS_NUM_PROTS = 2,
+};
+
+struct strp_msg {
+ int full_len;
+ int offset;
+};
+
+struct tls12_crypto_info_aes_ccm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[8];
+ unsigned char key[16];
+ unsigned char salt[4];
+ unsigned char rec_seq[8];
+};
+
+struct tls_msg {
+ struct strp_msg rxm;
+ u8 control;
+};
+
+struct trace_event_raw_tls_device_offload_set {
+ struct trace_entry ent;
+ struct sock *sk;
+ u64 rec_no;
+ int dir;
+ u32 tcp_seq;
+ int ret;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_decrypted {
+ struct trace_entry ent;
+ struct sock *sk;
+ u64 rec_no;
+ u32 tcp_seq;
+ u32 rec_len;
+ bool encrypted;
+ bool decrypted;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_rx_resync_send {
+ struct trace_entry ent;
+ struct sock *sk;
+ u64 rec_no;
+ u32 tcp_seq;
+ int sync_type;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_rx_resync_nh_schedule {
+ struct trace_entry ent;
+ struct sock *sk;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_rx_resync_nh_delay {
+ struct trace_entry ent;
+ struct sock *sk;
+ u32 sock_data;
+ u32 rec_len;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_tx_resync_req {
+ struct trace_entry ent;
+ struct sock *sk;
+ u32 tcp_seq;
+ u32 exp_tcp_seq;
+ char __data[0];
+};
+
+struct trace_event_raw_tls_device_tx_resync_send {
+ struct trace_entry ent;
+ struct sock *sk;
+ u64 rec_no;
+ u32 tcp_seq;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_tls_device_offload_set {};
+
+struct trace_event_data_offsets_tls_device_decrypted {};
+
+struct trace_event_data_offsets_tls_device_rx_resync_send {};
+
+struct trace_event_data_offsets_tls_device_rx_resync_nh_schedule {};
+
+struct trace_event_data_offsets_tls_device_rx_resync_nh_delay {};
+
+struct trace_event_data_offsets_tls_device_tx_resync_req {};
+
+struct trace_event_data_offsets_tls_device_tx_resync_send {};
+
+typedef void (*btf_trace_tls_device_offload_set)(void *, struct sock *, int, u32, u8 *, int);
+
+typedef void (*btf_trace_tls_device_decrypted)(void *, struct sock *, u32, u8 *, u32, bool, bool);
+
+typedef void (*btf_trace_tls_device_rx_resync_send)(void *, struct sock *, u32, u8 *, int);
+
+typedef void (*btf_trace_tls_device_rx_resync_nh_schedule)(void *, struct sock *);
+
+typedef void (*btf_trace_tls_device_rx_resync_nh_delay)(void *, struct sock *, u32, u32);
+
+typedef void (*btf_trace_tls_device_tx_resync_req)(void *, struct sock *, u32, u32);
+
+typedef void (*btf_trace_tls_device_tx_resync_send)(void *, struct sock *, u32, u8 *);
+
+enum {
+ XFRM_STATE_VOID = 0,
+ XFRM_STATE_ACQ = 1,
+ XFRM_STATE_VALID = 2,
+ XFRM_STATE_ERROR = 3,
+ XFRM_STATE_EXPIRED = 4,
+ XFRM_STATE_DEAD = 5,
+};
+
+struct xfrm_if;
+
+struct xfrm_if_cb {
+ struct xfrm_if * (*decode_session)(struct sk_buff *, short unsigned int);
+};
+
+struct xfrm_if_parms {
+ int link;
+ u32 if_id;
+};
+
+struct xfrm_if {
+ struct xfrm_if *next;
+ struct net_device *dev;
+ struct net *net;
+ struct xfrm_if_parms p;
+ struct gro_cells gro_cells;
+};
+
+struct xfrm_policy_walk {
+ struct xfrm_policy_walk_entry walk;
+ u8 type;
+ u32 seq;
+};
+
+struct xfrmk_spdinfo {
+ u32 incnt;
+ u32 outcnt;
+ u32 fwdcnt;
+ u32 inscnt;
+ u32 outscnt;
+ u32 fwdscnt;
+ u32 spdhcnt;
+ u32 spdhmcnt;
+};
+
+struct xfrm_flo {
+ struct dst_entry *dst_orig;
+ u8 flags;
+};
+
+struct xfrm_pol_inexact_node {
+ struct rb_node node;
+ union {
+ xfrm_address_t addr;
+ struct callback_head rcu;
+ };
+ u8 prefixlen;
+ struct rb_root root;
+ struct hlist_head hhead;
+};
+
+struct xfrm_pol_inexact_key {
+ possible_net_t net;
+ u32 if_id;
+ u16 family;
+ u8 dir;
+ u8 type;
+};
+
+struct xfrm_pol_inexact_bin {
+ struct xfrm_pol_inexact_key k;
+ struct rhash_head head;
+ struct hlist_head hhead;
+ seqcount_t count;
+ struct rb_root root_d;
+ struct rb_root root_s;
+ struct list_head inexact_bins;
+ struct callback_head rcu;
+};
+
+enum xfrm_pol_inexact_candidate_type {
+ XFRM_POL_CAND_BOTH = 0,
+ XFRM_POL_CAND_SADDR = 1,
+ XFRM_POL_CAND_DADDR = 2,
+ XFRM_POL_CAND_ANY = 3,
+ XFRM_POL_CAND_MAX = 4,
+};
+
+struct xfrm_pol_inexact_candidates {
+ struct hlist_head *res[4];
+};
+
+enum {
+ XFRM_MSG_BASE = 16,
+ XFRM_MSG_NEWSA = 16,
+ XFRM_MSG_DELSA = 17,
+ XFRM_MSG_GETSA = 18,
+ XFRM_MSG_NEWPOLICY = 19,
+ XFRM_MSG_DELPOLICY = 20,
+ XFRM_MSG_GETPOLICY = 21,
+ XFRM_MSG_ALLOCSPI = 22,
+ XFRM_MSG_ACQUIRE = 23,
+ XFRM_MSG_EXPIRE = 24,
+ XFRM_MSG_UPDPOLICY = 25,
+ XFRM_MSG_UPDSA = 26,
+ XFRM_MSG_POLEXPIRE = 27,
+ XFRM_MSG_FLUSHSA = 28,
+ XFRM_MSG_FLUSHPOLICY = 29,
+ XFRM_MSG_NEWAE = 30,
+ XFRM_MSG_GETAE = 31,
+ XFRM_MSG_REPORT = 32,
+ XFRM_MSG_MIGRATE = 33,
+ XFRM_MSG_NEWSADINFO = 34,
+ XFRM_MSG_GETSADINFO = 35,
+ XFRM_MSG_NEWSPDINFO = 36,
+ XFRM_MSG_GETSPDINFO = 37,
+ XFRM_MSG_MAPPING = 38,
+ __XFRM_MSG_MAX = 39,
+};
+
+enum xfrm_ae_ftype_t {
+ XFRM_AE_UNSPEC = 0,
+ XFRM_AE_RTHR = 1,
+ XFRM_AE_RVAL = 2,
+ XFRM_AE_LVAL = 4,
+ XFRM_AE_ETHR = 8,
+ XFRM_AE_CR = 16,
+ XFRM_AE_CE = 32,
+ XFRM_AE_CU = 64,
+ __XFRM_AE_MAX = 65,
+};
+
+enum xfrm_nlgroups {
+ XFRMNLGRP_NONE = 0,
+ XFRMNLGRP_ACQUIRE = 1,
+ XFRMNLGRP_EXPIRE = 2,
+ XFRMNLGRP_SA = 3,
+ XFRMNLGRP_POLICY = 4,
+ XFRMNLGRP_AEVENTS = 5,
+ XFRMNLGRP_REPORT = 6,
+ XFRMNLGRP_MIGRATE = 7,
+ XFRMNLGRP_MAPPING = 8,
+ __XFRMNLGRP_MAX = 9,
+};
+
+enum {
+ XFRM_MODE_FLAG_TUNNEL = 1,
+};
+
+struct km_event {
+ union {
+ u32 hard;
+ u32 proto;
+ u32 byid;
+ u32 aevent;
+ u32 type;
+ } data;
+ u32 seq;
+ u32 portid;
+ u32 event;
+ struct net *net;
+};
+
+struct xfrm_kmaddress {
+ xfrm_address_t local;
+ xfrm_address_t remote;
+ u32 reserved;
+ u16 family;
+};
+
+struct xfrm_migrate {
+ xfrm_address_t old_daddr;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_daddr;
+ xfrm_address_t new_saddr;
+ u8 proto;
+ u8 mode;
+ u16 reserved;
+ u32 reqid;
+ u16 old_family;
+ u16 new_family;
+};
+
+struct xfrm_mgr {
+ struct list_head list;
+ int (*notify)(struct xfrm_state *, const struct km_event *);
+ int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *);
+ struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *);
+ int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16);
+ int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *);
+ int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *);
+ int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *);
+ bool (*is_alive)(const struct km_event *);
+};
+
+struct xfrmk_sadinfo {
+ u32 sadhcnt;
+ u32 sadhmcnt;
+ u32 sadcnt;
+};
+
+struct ip_beet_phdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 padlen;
+ __u8 reserved;
+};
+
+struct __ip6_tnl_parm {
+ char name[16];
+ int link;
+ __u8 proto;
+ __u8 encap_limit;
+ __u8 hop_limit;
+ bool collect_md;
+ __be32 flowinfo;
+ __u32 flags;
+ struct in6_addr laddr;
+ struct in6_addr raddr;
+ __be16 i_flags;
+ __be16 o_flags;
+ __be32 i_key;
+ __be32 o_key;
+ __u32 fwmark;
+ __u32 index;
+ __u8 erspan_ver;
+ __u8 dir;
+ __u16 hwid;
+};
+
+struct ip6_tnl {
+ struct ip6_tnl *next;
+ struct net_device *dev;
+ struct net *net;
+ struct __ip6_tnl_parm parms;
+ struct flowi fl;
+ struct dst_cache dst_cache;
+ struct gro_cells gro_cells;
+ int err_count;
+ long unsigned int err_time;
+ __u32 i_seqno;
+ __u32 o_seqno;
+ int hlen;
+ int tun_hlen;
+ int encap_hlen;
+ struct ip_tunnel_encap encap;
+ int mlink;
+};
+
+struct xfrm_skb_cb {
+ struct xfrm_tunnel_skb_cb header;
+ union {
+ struct {
+ __u32 low;
+ __u32 hi;
+ } output;
+ struct {
+ __be32 low;
+ __be32 hi;
+ } input;
+ } seq;
+};
+
+struct xfrm_trans_tasklet {
+ struct tasklet_struct tasklet;
+ struct sk_buff_head queue;
+};
+
+struct xfrm_trans_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+ int (*finish)(struct net *, struct sock *, struct sk_buff *);
+ struct net *net;
+};
+
+struct sadb_alg {
+ __u8 sadb_alg_id;
+ __u8 sadb_alg_ivlen;
+ __u16 sadb_alg_minbits;
+ __u16 sadb_alg_maxbits;
+ __u16 sadb_alg_reserved;
+};
+
+struct xfrm_algo_aead_info {
+ char *geniv;
+ u16 icv_truncbits;
+};
+
+struct xfrm_algo_auth_info {
+ u16 icv_truncbits;
+ u16 icv_fullbits;
+};
+
+struct xfrm_algo_encr_info {
+ char *geniv;
+ u16 blockbits;
+ u16 defkeybits;
+};
+
+struct xfrm_algo_comp_info {
+ u16 threshold;
+};
+
+struct xfrm_algo_desc {
+ char *name;
+ char *compat;
+ u8 available: 1;
+ u8 pfkey_supported: 1;
+ union {
+ struct xfrm_algo_aead_info aead;
+ struct xfrm_algo_auth_info auth;
+ struct xfrm_algo_encr_info encr;
+ struct xfrm_algo_comp_info comp;
+ } uinfo;
+ struct sadb_alg desc;
+};
+
+struct xfrm_algo_list {
+ struct xfrm_algo_desc *algs;
+ int entries;
+ u32 type;
+ u32 mask;
+};
+
+struct xfrm_aead_name {
+ const char *name;
+ int icvbits;
+};
+
+enum {
+ XFRM_SHARE_ANY = 0,
+ XFRM_SHARE_SESSION = 1,
+ XFRM_SHARE_USER = 2,
+ XFRM_SHARE_UNIQUE = 3,
+};
+
+struct xfrm_user_sec_ctx {
+ __u16 len;
+ __u16 exttype;
+ __u8 ctx_alg;
+ __u8 ctx_doi;
+ __u16 ctx_len;
+};
+
+struct xfrm_user_tmpl {
+ struct xfrm_id id;
+ __u16 family;
+ xfrm_address_t saddr;
+ __u32 reqid;
+ __u8 mode;
+ __u8 share;
+ __u8 optional;
+ __u32 aalgos;
+ __u32 ealgos;
+ __u32 calgos;
+};
+
+struct xfrm_userpolicy_type {
+ __u8 type;
+ __u16 reserved1;
+ __u8 reserved2;
+};
+
+enum xfrm_attr_type_t {
+ XFRMA_UNSPEC = 0,
+ XFRMA_ALG_AUTH = 1,
+ XFRMA_ALG_CRYPT = 2,
+ XFRMA_ALG_COMP = 3,
+ XFRMA_ENCAP = 4,
+ XFRMA_TMPL = 5,
+ XFRMA_SA = 6,
+ XFRMA_POLICY = 7,
+ XFRMA_SEC_CTX = 8,
+ XFRMA_LTIME_VAL = 9,
+ XFRMA_REPLAY_VAL = 10,
+ XFRMA_REPLAY_THRESH = 11,
+ XFRMA_ETIMER_THRESH = 12,
+ XFRMA_SRCADDR = 13,
+ XFRMA_COADDR = 14,
+ XFRMA_LASTUSED = 15,
+ XFRMA_POLICY_TYPE = 16,
+ XFRMA_MIGRATE = 17,
+ XFRMA_ALG_AEAD = 18,
+ XFRMA_KMADDRESS = 19,
+ XFRMA_ALG_AUTH_TRUNC = 20,
+ XFRMA_MARK = 21,
+ XFRMA_TFCPAD = 22,
+ XFRMA_REPLAY_ESN_VAL = 23,
+ XFRMA_SA_EXTRA_FLAGS = 24,
+ XFRMA_PROTO = 25,
+ XFRMA_ADDRESS_FILTER = 26,
+ XFRMA_PAD = 27,
+ XFRMA_OFFLOAD_DEV = 28,
+ XFRMA_SET_MARK = 29,
+ XFRMA_SET_MARK_MASK = 30,
+ XFRMA_IF_ID = 31,
+ __XFRMA_MAX = 32,
+};
+
+enum xfrm_sadattr_type_t {
+ XFRMA_SAD_UNSPEC = 0,
+ XFRMA_SAD_CNT = 1,
+ XFRMA_SAD_HINFO = 2,
+ __XFRMA_SAD_MAX = 3,
+};
+
+struct xfrmu_sadhinfo {
+ __u32 sadhcnt;
+ __u32 sadhmcnt;
+};
+
+enum xfrm_spdattr_type_t {
+ XFRMA_SPD_UNSPEC = 0,
+ XFRMA_SPD_INFO = 1,
+ XFRMA_SPD_HINFO = 2,
+ XFRMA_SPD_IPV4_HTHRESH = 3,
+ XFRMA_SPD_IPV6_HTHRESH = 4,
+ __XFRMA_SPD_MAX = 5,
+};
+
+struct xfrmu_spdinfo {
+ __u32 incnt;
+ __u32 outcnt;
+ __u32 fwdcnt;
+ __u32 inscnt;
+ __u32 outscnt;
+ __u32 fwdscnt;
+};
+
+struct xfrmu_spdhinfo {
+ __u32 spdhcnt;
+ __u32 spdhmcnt;
+};
+
+struct xfrmu_spdhthresh {
+ __u8 lbits;
+ __u8 rbits;
+};
+
+struct xfrm_usersa_info {
+ struct xfrm_selector sel;
+ struct xfrm_id id;
+ xfrm_address_t saddr;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_lifetime_cur curlft;
+ struct xfrm_stats stats;
+ __u32 seq;
+ __u32 reqid;
+ __u16 family;
+ __u8 mode;
+ __u8 replay_window;
+ __u8 flags;
+};
+
+struct xfrm_usersa_id {
+ xfrm_address_t daddr;
+ __be32 spi;
+ __u16 family;
+ __u8 proto;
+};
+
+struct xfrm_aevent_id {
+ struct xfrm_usersa_id sa_id;
+ xfrm_address_t saddr;
+ __u32 flags;
+ __u32 reqid;
+};
+
+struct xfrm_userspi_info {
+ struct xfrm_usersa_info info;
+ __u32 min;
+ __u32 max;
+};
+
+struct xfrm_userpolicy_info {
+ struct xfrm_selector sel;
+ struct xfrm_lifetime_cfg lft;
+ struct xfrm_lifetime_cur curlft;
+ __u32 priority;
+ __u32 index;
+ __u8 dir;
+ __u8 action;
+ __u8 flags;
+ __u8 share;
+};
+
+struct xfrm_userpolicy_id {
+ struct xfrm_selector sel;
+ __u32 index;
+ __u8 dir;
+};
+
+struct xfrm_user_acquire {
+ struct xfrm_id id;
+ xfrm_address_t saddr;
+ struct xfrm_selector sel;
+ struct xfrm_userpolicy_info policy;
+ __u32 aalgos;
+ __u32 ealgos;
+ __u32 calgos;
+ __u32 seq;
+};
+
+struct xfrm_user_expire {
+ struct xfrm_usersa_info state;
+ __u8 hard;
+};
+
+struct xfrm_user_polexpire {
+ struct xfrm_userpolicy_info pol;
+ __u8 hard;
+};
+
+struct xfrm_usersa_flush {
+ __u8 proto;
+};
+
+struct xfrm_user_report {
+ __u8 proto;
+ struct xfrm_selector sel;
+};
+
+struct xfrm_user_mapping {
+ struct xfrm_usersa_id id;
+ __u32 reqid;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_saddr;
+ __be16 old_sport;
+ __be16 new_sport;
+};
+
+struct xfrm_user_offload {
+ int ifindex;
+ __u8 flags;
+};
+
+struct xfrm_dump_info {
+ struct sk_buff *in_skb;
+ struct sk_buff *out_skb;
+ u32 nlmsg_seq;
+ u16 nlmsg_flags;
+};
+
+struct xfrm_link {
+ int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*start)(struct netlink_callback *);
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
+ int (*done)(struct netlink_callback *);
+ const struct nla_policy *nla_pol;
+ int nla_max;
+};
+
+struct unix_stream_read_state {
+ int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *);
+ struct socket *socket;
+ struct msghdr *msg;
+ struct pipe_inode_info *pipe;
+ size_t size;
+ int flags;
+ unsigned int splice_flags;
+};
+
+struct unix_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 udiag_states;
+ __u32 udiag_ino;
+ __u32 udiag_show;
+ __u32 udiag_cookie[2];
+};
+
+struct unix_diag_msg {
+ __u8 udiag_family;
+ __u8 udiag_type;
+ __u8 udiag_state;
+ __u8 pad;
+ __u32 udiag_ino;
+ __u32 udiag_cookie[2];
+};
+
+enum {
+ UNIX_DIAG_NAME = 0,
+ UNIX_DIAG_VFS = 1,
+ UNIX_DIAG_PEER = 2,
+ UNIX_DIAG_ICONS = 3,
+ UNIX_DIAG_RQLEN = 4,
+ UNIX_DIAG_MEMINFO = 5,
+ UNIX_DIAG_SHUTDOWN = 6,
+ UNIX_DIAG_UID = 7,
+ __UNIX_DIAG_MAX = 8,
+};
+
+struct unix_diag_vfs {
+ __u32 udiag_vfs_ino;
+ __u32 udiag_vfs_dev;
+};
+
+struct unix_diag_rqlen {
+ __u32 udiag_rqueue;
+ __u32 udiag_wqueue;
+};
+
+enum flowlabel_reflect {
+ FLOWLABEL_REFLECT_ESTABLISHED = 1,
+ FLOWLABEL_REFLECT_TCP_RESET = 2,
+ FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4,
+};
+
+struct in6_rtmsg {
+ struct in6_addr rtmsg_dst;
+ struct in6_addr rtmsg_src;
+ struct in6_addr rtmsg_gateway;
+ __u32 rtmsg_type;
+ __u16 rtmsg_dst_len;
+ __u16 rtmsg_src_len;
+ __u32 rtmsg_metric;
+ long unsigned int rtmsg_info;
+ __u32 rtmsg_flags;
+ int rtmsg_ifindex;
+};
+
+struct ac6_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+ struct inet6_dev *idev;
+};
+
+struct ip6_fraglist_iter {
+ struct ipv6hdr *tmp_hdr;
+ struct sk_buff *frag;
+ int offset;
+ unsigned int hlen;
+ __be32 frag_id;
+ u8 nexthdr;
+};
+
+struct ip6_frag_state {
+ u8 *prevhdr;
+ unsigned int hlen;
+ unsigned int mtu;
+ unsigned int left;
+ int offset;
+ int ptr;
+ int hroom;
+ int troom;
+ __be32 frag_id;
+ u8 nexthdr;
+};
+
+struct ipcm6_cookie {
+ struct sockcm_cookie sockc;
+ __s16 hlimit;
+ __s16 tclass;
+ __s8 dontfrag;
+ struct ipv6_txoptions *opt;
+ __u16 gso_size;
+};
+
+enum {
+ IFLA_INET6_UNSPEC = 0,
+ IFLA_INET6_FLAGS = 1,
+ IFLA_INET6_CONF = 2,
+ IFLA_INET6_STATS = 3,
+ IFLA_INET6_MCAST = 4,
+ IFLA_INET6_CACHEINFO = 5,
+ IFLA_INET6_ICMP6STATS = 6,
+ IFLA_INET6_TOKEN = 7,
+ IFLA_INET6_ADDR_GEN_MODE = 8,
+ __IFLA_INET6_MAX = 9,
+};
+
+enum in6_addr_gen_mode {
+ IN6_ADDR_GEN_MODE_EUI64 = 0,
+ IN6_ADDR_GEN_MODE_NONE = 1,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2,
+ IN6_ADDR_GEN_MODE_RANDOM = 3,
+};
+
+struct ifla_cacheinfo {
+ __u32 max_reasm_len;
+ __u32 tstamp;
+ __u32 reachable_time;
+ __u32 retrans_time;
+};
+
+struct wpan_phy;
+
+struct wpan_dev_header_ops;
+
+struct wpan_dev {
+ struct wpan_phy *wpan_phy;
+ int iftype;
+ struct list_head list;
+ struct net_device *netdev;
+ const struct wpan_dev_header_ops *header_ops;
+ struct net_device *lowpan_dev;
+ u32 identifier;
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 extended_addr;
+ atomic_t bsn;
+ atomic_t dsn;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+ bool lbt;
+ bool promiscuous_mode;
+ bool ackreq;
+};
+
+struct prefixmsg {
+ unsigned char prefix_family;
+ unsigned char prefix_pad1;
+ short unsigned int prefix_pad2;
+ int prefix_ifindex;
+ unsigned char prefix_type;
+ unsigned char prefix_len;
+ unsigned char prefix_flags;
+ unsigned char prefix_pad3;
+};
+
+enum {
+ PREFIX_UNSPEC = 0,
+ PREFIX_ADDRESS = 1,
+ PREFIX_CACHEINFO = 2,
+ __PREFIX_MAX = 3,
+};
+
+struct prefix_cacheinfo {
+ __u32 preferred_time;
+ __u32 valid_time;
+};
+
+struct in6_ifreq {
+ struct in6_addr ifr6_addr;
+ __u32 ifr6_prefixlen;
+ int ifr6_ifindex;
+};
+
+enum {
+ DEVCONF_FORWARDING = 0,
+ DEVCONF_HOPLIMIT = 1,
+ DEVCONF_MTU6 = 2,
+ DEVCONF_ACCEPT_RA = 3,
+ DEVCONF_ACCEPT_REDIRECTS = 4,
+ DEVCONF_AUTOCONF = 5,
+ DEVCONF_DAD_TRANSMITS = 6,
+ DEVCONF_RTR_SOLICITS = 7,
+ DEVCONF_RTR_SOLICIT_INTERVAL = 8,
+ DEVCONF_RTR_SOLICIT_DELAY = 9,
+ DEVCONF_USE_TEMPADDR = 10,
+ DEVCONF_TEMP_VALID_LFT = 11,
+ DEVCONF_TEMP_PREFERED_LFT = 12,
+ DEVCONF_REGEN_MAX_RETRY = 13,
+ DEVCONF_MAX_DESYNC_FACTOR = 14,
+ DEVCONF_MAX_ADDRESSES = 15,
+ DEVCONF_FORCE_MLD_VERSION = 16,
+ DEVCONF_ACCEPT_RA_DEFRTR = 17,
+ DEVCONF_ACCEPT_RA_PINFO = 18,
+ DEVCONF_ACCEPT_RA_RTR_PREF = 19,
+ DEVCONF_RTR_PROBE_INTERVAL = 20,
+ DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21,
+ DEVCONF_PROXY_NDP = 22,
+ DEVCONF_OPTIMISTIC_DAD = 23,
+ DEVCONF_ACCEPT_SOURCE_ROUTE = 24,
+ DEVCONF_MC_FORWARDING = 25,
+ DEVCONF_DISABLE_IPV6 = 26,
+ DEVCONF_ACCEPT_DAD = 27,
+ DEVCONF_FORCE_TLLAO = 28,
+ DEVCONF_NDISC_NOTIFY = 29,
+ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30,
+ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31,
+ DEVCONF_SUPPRESS_FRAG_NDISC = 32,
+ DEVCONF_ACCEPT_RA_FROM_LOCAL = 33,
+ DEVCONF_USE_OPTIMISTIC = 34,
+ DEVCONF_ACCEPT_RA_MTU = 35,
+ DEVCONF_STABLE_SECRET = 36,
+ DEVCONF_USE_OIF_ADDRS_ONLY = 37,
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38,
+ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39,
+ DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40,
+ DEVCONF_DROP_UNSOLICITED_NA = 41,
+ DEVCONF_KEEP_ADDR_ON_DOWN = 42,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43,
+ DEVCONF_SEG6_ENABLED = 44,
+ DEVCONF_SEG6_REQUIRE_HMAC = 45,
+ DEVCONF_ENHANCED_DAD = 46,
+ DEVCONF_ADDR_GEN_MODE = 47,
+ DEVCONF_DISABLE_POLICY = 48,
+ DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49,
+ DEVCONF_NDISC_TCLASS = 50,
+ DEVCONF_RPL_SEG_ENABLED = 51,
+ DEVCONF_MAX = 52,
+};
+
+enum {
+ INET6_IFADDR_STATE_PREDAD = 0,
+ INET6_IFADDR_STATE_DAD = 1,
+ INET6_IFADDR_STATE_POSTDAD = 2,
+ INET6_IFADDR_STATE_ERRDAD = 3,
+ INET6_IFADDR_STATE_DEAD = 4,
+};
+
+enum nl802154_cca_modes {
+ __NL802154_CCA_INVALID = 0,
+ NL802154_CCA_ENERGY = 1,
+ NL802154_CCA_CARRIER = 2,
+ NL802154_CCA_ENERGY_CARRIER = 3,
+ NL802154_CCA_ALOHA = 4,
+ NL802154_CCA_UWB_SHR = 5,
+ NL802154_CCA_UWB_MULTIPLEXED = 6,
+ __NL802154_CCA_ATTR_AFTER_LAST = 7,
+ NL802154_CCA_ATTR_MAX = 6,
+};
+
+enum nl802154_cca_opts {
+ NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0,
+ NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1,
+ __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2,
+ NL802154_CCA_OPT_ATTR_MAX = 1,
+};
+
+enum nl802154_supported_bool_states {
+ NL802154_SUPPORTED_BOOL_FALSE = 0,
+ NL802154_SUPPORTED_BOOL_TRUE = 1,
+ __NL802154_SUPPORTED_BOOL_INVALD = 2,
+ NL802154_SUPPORTED_BOOL_BOTH = 3,
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4,
+ NL802154_SUPPORTED_BOOL_MAX = 3,
+};
+
+struct wpan_phy_supported {
+ u32 channels[32];
+ u32 cca_modes;
+ u32 cca_opts;
+ u32 iftypes;
+ enum nl802154_supported_bool_states lbt;
+ u8 min_minbe;
+ u8 max_minbe;
+ u8 min_maxbe;
+ u8 max_maxbe;
+ u8 min_csma_backoffs;
+ u8 max_csma_backoffs;
+ s8 min_frame_retries;
+ s8 max_frame_retries;
+ size_t tx_powers_size;
+ size_t cca_ed_levels_size;
+ const s32 *tx_powers;
+ const s32 *cca_ed_levels;
+};
+
+struct wpan_phy_cca {
+ enum nl802154_cca_modes mode;
+ enum nl802154_cca_opts opt;
+};
+
+struct wpan_phy {
+ const void *privid;
+ u32 flags;
+ u8 current_channel;
+ u8 current_page;
+ struct wpan_phy_supported supported;
+ s32 transmit_power;
+ struct wpan_phy_cca cca;
+ __le64 perm_extended_addr;
+ s32 cca_ed_level;
+ u8 symbol_duration;
+ u16 lifs_period;
+ u16 sifs_period;
+ struct device dev;
+ possible_net_t _net;
+ long: 64;
+ long: 64;
+ long: 64;
+ char priv[0];
+};
+
+struct ieee802154_addr {
+ u8 mode;
+ __le16 pan_id;
+ union {
+ __le16 short_addr;
+ __le64 extended_addr;
+ };
+};
+
+struct wpan_dev_header_ops {
+ int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int);
+};
+
+union fwnet_hwaddr {
+ u8 u[16];
+ struct {
+ __be64 uniq_id;
+ u8 max_rec;
+ u8 sspd;
+ __be16 fifo_hi;
+ __be32 fifo_lo;
+ } uc;
+};
+
+struct in6_validator_info {
+ struct in6_addr i6vi_addr;
+ struct inet6_dev *i6vi_dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct ifa6_config {
+ const struct in6_addr *pfx;
+ unsigned int plen;
+ const struct in6_addr *peer_pfx;
+ u32 rt_priority;
+ u32 ifa_flags;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u16 scope;
+};
+
+enum cleanup_prefix_rt_t {
+ CLEANUP_PREFIX_RT_NOP = 0,
+ CLEANUP_PREFIX_RT_DEL = 1,
+ CLEANUP_PREFIX_RT_EXPIRE = 2,
+};
+
+enum {
+ IPV6_SADDR_RULE_INIT = 0,
+ IPV6_SADDR_RULE_LOCAL = 1,
+ IPV6_SADDR_RULE_SCOPE = 2,
+ IPV6_SADDR_RULE_PREFERRED = 3,
+ IPV6_SADDR_RULE_OIF = 4,
+ IPV6_SADDR_RULE_LABEL = 5,
+ IPV6_SADDR_RULE_PRIVACY = 6,
+ IPV6_SADDR_RULE_ORCHID = 7,
+ IPV6_SADDR_RULE_PREFIX = 8,
+ IPV6_SADDR_RULE_MAX = 9,
+};
+
+struct ipv6_saddr_score {
+ int rule;
+ int addr_type;
+ struct inet6_ifaddr *ifa;
+ long unsigned int scorebits[1];
+ int scopedist;
+ int matchlen;
+};
+
+struct ipv6_saddr_dst {
+ const struct in6_addr *addr;
+ int ifindex;
+ int scope;
+ int label;
+ unsigned int prefs;
+};
+
+struct if6_iter_state {
+ struct seq_net_private p;
+ int bucket;
+ int offset;
+};
+
+enum addr_type_t {
+ UNICAST_ADDR = 0,
+ MULTICAST_ADDR = 1,
+ ANYCAST_ADDR = 2,
+};
+
+struct inet6_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
+ enum addr_type_t type;
+};
+
+enum {
+ DAD_PROCESS = 0,
+ DAD_BEGIN = 1,
+ DAD_ABORT = 2,
+};
+
+struct ifaddrlblmsg {
+ __u8 ifal_family;
+ __u8 __ifal_reserved;
+ __u8 ifal_prefixlen;
+ __u8 ifal_flags;
+ __u32 ifal_index;
+ __u32 ifal_seq;
+};
+
+enum {
+ IFAL_ADDRESS = 1,
+ IFAL_LABEL = 2,
+ __IFAL_MAX = 3,
+};
+
+struct ip6addrlbl_entry {
+ struct in6_addr prefix;
+ int prefixlen;
+ int ifindex;
+ int addrtype;
+ u32 label;
+ struct hlist_node list;
+ struct callback_head rcu;
+};
+
+struct ip6addrlbl_init_table {
+ const struct in6_addr *prefix;
+ int prefixlen;
+ u32 label;
+};
+
+struct rd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ struct in6_addr dest;
+ __u8 opt[0];
+};
+
+struct fib6_gc_args {
+ int timeout;
+ int more;
+};
+
+struct rt6_exception {
+ struct hlist_node hlist;
+ struct rt6_info *rt6i;
+ long unsigned int stamp;
+ struct callback_head rcu;
+};
+
+struct rt6_rtnl_dump_arg {
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ struct net *net;
+ struct fib_dump_filter filter;
+};
+
+struct netevent_redirect {
+ struct dst_entry *old;
+ struct dst_entry *new;
+ struct neighbour *neigh;
+ const void *daddr;
+};
+
+struct trace_event_raw_fib6_table_lookup {
+ struct trace_entry ent;
+ u32 tb_id;
+ int err;
+ int oif;
+ int iif;
+ __u8 tos;
+ __u8 scope;
+ __u8 flags;
+ __u8 src[16];
+ __u8 dst[16];
+ u16 sport;
+ u16 dport;
+ u8 proto;
+ u8 rt_type;
+ u32 __data_loc_name;
+ __u8 gw[16];
+ char __data[0];
+};
+
+struct trace_event_data_offsets_fib6_table_lookup {
+ u32 name;
+};
+
+typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *);
+
+enum rt6_nud_state {
+ RT6_NUD_FAIL_HARD = -3,
+ RT6_NUD_FAIL_PROBE = -2,
+ RT6_NUD_FAIL_DO_RR = -1,
+ RT6_NUD_SUCCEED = 1,
+};
+
+struct fib6_nh_dm_arg {
+ struct net *net;
+ const struct in6_addr *saddr;
+ int oif;
+ int flags;
+ struct fib6_nh *nh;
+};
+
+struct fib6_nh_frl_arg {
+ u32 flags;
+ int oif;
+ int strict;
+ int *mpri;
+ bool *do_rr;
+ struct fib6_nh *nh;
+};
+
+struct fib6_nh_excptn_arg {
+ struct rt6_info *rt;
+ int plen;
+};
+
+struct fib6_nh_match_arg {
+ const struct net_device *dev;
+ const struct in6_addr *gw;
+ struct fib6_nh *match;
+};
+
+struct fib6_nh_age_excptn_arg {
+ struct fib6_gc_args *gc_args;
+ long unsigned int now;
+};
+
+struct fib6_nh_rd_arg {
+ struct fib6_result *res;
+ struct flowi6 *fl6;
+ const struct in6_addr *gw;
+ struct rt6_info **ret;
+};
+
+struct ip6rd_flowi {
+ struct flowi6 fl6;
+ struct in6_addr gateway;
+};
+
+struct fib6_nh_del_cached_rt_arg {
+ struct fib6_config *cfg;
+ struct fib6_info *f6i;
+};
+
+struct arg_dev_net_ip {
+ struct net_device *dev;
+ struct net *net;
+ struct in6_addr *addr;
+};
+
+struct arg_netdev_event {
+ const struct net_device *dev;
+ union {
+ unsigned char nh_flags;
+ long unsigned int event;
+ };
+};
+
+struct rt6_mtu_change_arg {
+ struct net_device *dev;
+ unsigned int mtu;
+ struct fib6_info *f6i;
+};
+
+struct rt6_nh {
+ struct fib6_info *fib6_info;
+ struct fib6_config r_cfg;
+ struct list_head next;
+};
+
+struct fib6_nh_exception_dump_walker {
+ struct rt6_rtnl_dump_arg *dump;
+ struct fib6_info *rt;
+ unsigned int flags;
+ unsigned int skip;
+ unsigned int count;
+};
+
+enum fib6_walk_state {
+ FWS_L = 0,
+ FWS_R = 1,
+ FWS_C = 2,
+ FWS_U = 3,
+};
+
+struct fib6_walker {
+ struct list_head lh;
+ struct fib6_node *root;
+ struct fib6_node *node;
+ struct fib6_info *leaf;
+ enum fib6_walk_state state;
+ unsigned int skip;
+ unsigned int count;
+ unsigned int skip_in_node;
+ int (*func)(struct fib6_walker *);
+ void *args;
+};
+
+typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int);
+
+struct fib6_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct fib6_info *rt;
+ unsigned int nsiblings;
+};
+
+struct ipv6_route_iter {
+ struct seq_net_private p;
+ struct fib6_walker w;
+ loff_t skip;
+ struct fib6_table *tbl;
+ int sernum;
+};
+
+struct bpf_iter__ipv6_route {
+ union {
+ struct bpf_iter_meta *meta;
+ };
+ union {
+ struct fib6_info *rt;
+ };
+};
+
+struct fib6_cleaner {
+ struct fib6_walker w;
+ struct net *net;
+ int (*func)(struct fib6_info *, void *);
+ int sernum;
+ void *arg;
+ bool skip_notify;
+};
+
+enum {
+ FIB6_NO_SERNUM_CHANGE = 0,
+};
+
+struct fib6_dump_arg {
+ struct net *net;
+ struct notifier_block *nb;
+ struct netlink_ext_ack *extack;
+};
+
+struct fib6_nh_pcpu_arg {
+ struct fib6_info *from;
+ const struct fib6_table *table;
+};
+
+struct lookup_args {
+ int offset;
+ const struct in6_addr *addr;
+};
+
+struct ipv6_mreq {
+ struct in6_addr ipv6mr_multiaddr;
+ int ipv6mr_ifindex;
+};
+
+struct in6_flowlabel_req {
+ struct in6_addr flr_dst;
+ __be32 flr_label;
+ __u8 flr_action;
+ __u8 flr_share;
+ __u16 flr_flags;
+ __u16 flr_expires;
+ __u16 flr_linger;
+ __u32 __flr_pad;
+};
+
+struct ip6_mtuinfo {
+ struct sockaddr_in6 ip6m_addr;
+ __u32 ip6m_mtu;
+};
+
+struct nduseroptmsg {
+ unsigned char nduseropt_family;
+ unsigned char nduseropt_pad1;
+ short unsigned int nduseropt_opts_len;
+ int nduseropt_ifindex;
+ __u8 nduseropt_icmp_type;
+ __u8 nduseropt_icmp_code;
+ short unsigned int nduseropt_pad2;
+ unsigned int nduseropt_pad3;
+};
+
+enum {
+ NDUSEROPT_UNSPEC = 0,
+ NDUSEROPT_SRCADDR = 1,
+ __NDUSEROPT_MAX = 2,
+};
+
+struct nd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ __u8 opt[0];
+};
+
+struct rs_msg {
+ struct icmp6hdr icmph;
+ __u8 opt[0];
+};
+
+struct ra_msg {
+ struct icmp6hdr icmph;
+ __be32 reachable_time;
+ __be32 retrans_timer;
+};
+
+struct icmp6_filter {
+ __u32 data[8];
+};
+
+struct raw6_sock {
+ struct inet_sock inet;
+ __u32 checksum;
+ __u32 offset;
+ struct icmp6_filter filter;
+ __u32 ip6mr_table;
+ struct ipv6_pinfo inet6;
+};
+
+struct raw6_frag_vec {
+ struct msghdr *msg;
+ int hlen;
+ char c[4];
+};
+
+struct icmpv6_msg {
+ struct sk_buff *skb;
+ int offset;
+ uint8_t type;
+};
+
+struct icmp6_err {
+ int err;
+ int fatal;
+};
+
+struct mld_msg {
+ struct icmp6hdr mld_hdr;
+ struct in6_addr mld_mca;
+};
+
+struct mld2_grec {
+ __u8 grec_type;
+ __u8 grec_auxwords;
+ __be16 grec_nsrcs;
+ struct in6_addr grec_mca;
+ struct in6_addr grec_src[0];
+};
+
+struct mld2_report {
+ struct icmp6hdr mld2r_hdr;
+ struct mld2_grec mld2r_grec[0];
+};
+
+struct mld2_query {
+ struct icmp6hdr mld2q_hdr;
+ struct in6_addr mld2q_mca;
+ __u8 mld2q_qrv: 3;
+ __u8 mld2q_suppress: 1;
+ __u8 mld2q_resv2: 4;
+ __u8 mld2q_qqic;
+ __be16 mld2q_nsrcs;
+ struct in6_addr mld2q_srcs[0];
+};
+
+struct igmp6_mc_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+ struct inet6_dev *idev;
+};
+
+struct igmp6_mcf_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+ struct inet6_dev *idev;
+ struct ifmcaddr6 *im;
+};
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER = 0,
+ IP6_DEFRAG_CONNTRACK_IN = 1,
+ __IP6_DEFRAG_CONNTRACK_IN = 65536,
+ IP6_DEFRAG_CONNTRACK_OUT = 65537,
+ __IP6_DEFRAG_CONNTRACK_OUT = 131072,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608,
+};
+
+struct frag_queue {
+ struct inet_frag_queue q;
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+struct tcp6_pseudohdr {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ __be32 len;
+ __be32 protocol;
+};
+
+struct rt0_hdr {
+ struct ipv6_rt_hdr rt_hdr;
+ __u32 reserved;
+ struct in6_addr addr[0];
+};
+
+struct ipv6_rpl_sr_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+ __u32 cmpre: 4;
+ __u32 cmpri: 4;
+ __u32 reserved: 4;
+ __u32 pad: 4;
+ __u32 reserved1: 16;
+ union {
+ struct in6_addr addr[0];
+ __u8 data[0];
+ } segments;
+};
+
+struct tlvtype_proc {
+ int type;
+ bool (*func)(struct sk_buff *, int);
+};
+
+struct ip6fl_iter_state {
+ struct seq_net_private p;
+ struct pid_namespace *pid_ns;
+ int bucket;
+};
+
+struct sr6_tlv {
+ __u8 type;
+ __u8 len;
+ __u8 data[0];
+};
+
+enum {
+ SEG6_ATTR_UNSPEC = 0,
+ SEG6_ATTR_DST = 1,
+ SEG6_ATTR_DSTLEN = 2,
+ SEG6_ATTR_HMACKEYID = 3,
+ SEG6_ATTR_SECRET = 4,
+ SEG6_ATTR_SECRETLEN = 5,
+ SEG6_ATTR_ALGID = 6,
+ SEG6_ATTR_HMACINFO = 7,
+ __SEG6_ATTR_MAX = 8,
+};
+
+enum {
+ SEG6_CMD_UNSPEC = 0,
+ SEG6_CMD_SETHMAC = 1,
+ SEG6_CMD_DUMPHMAC = 2,
+ SEG6_CMD_SET_TUNSRC = 3,
+ SEG6_CMD_GET_TUNSRC = 4,
+ __SEG6_CMD_MAX = 5,
+};
+
+struct xfrm6_protocol {
+ int (*handler)(struct sk_buff *);
+ int (*input_handler)(struct sk_buff *, int, __be32, int);
+ int (*cb_handler)(struct sk_buff *, int);
+ int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32);
+ struct xfrm6_protocol *next;
+ int priority;
+};
+
+struct br_input_skb_cb {
+ struct net_device *brdev;
+ u16 frag_max_size;
+ u8 igmp;
+ u8 mrouters_only: 1;
+ u8 proxyarp_replied: 1;
+ u8 src_port_isolated: 1;
+ int offload_fwd_mark;
+};
+
+struct nf_bridge_frag_data;
+
+enum {
+ IFLA_IPTUN_UNSPEC = 0,
+ IFLA_IPTUN_LINK = 1,
+ IFLA_IPTUN_LOCAL = 2,
+ IFLA_IPTUN_REMOTE = 3,
+ IFLA_IPTUN_TTL = 4,
+ IFLA_IPTUN_TOS = 5,
+ IFLA_IPTUN_ENCAP_LIMIT = 6,
+ IFLA_IPTUN_FLOWINFO = 7,
+ IFLA_IPTUN_FLAGS = 8,
+ IFLA_IPTUN_PROTO = 9,
+ IFLA_IPTUN_PMTUDISC = 10,
+ IFLA_IPTUN_6RD_PREFIX = 11,
+ IFLA_IPTUN_6RD_RELAY_PREFIX = 12,
+ IFLA_IPTUN_6RD_PREFIXLEN = 13,
+ IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14,
+ IFLA_IPTUN_ENCAP_TYPE = 15,
+ IFLA_IPTUN_ENCAP_FLAGS = 16,
+ IFLA_IPTUN_ENCAP_SPORT = 17,
+ IFLA_IPTUN_ENCAP_DPORT = 18,
+ IFLA_IPTUN_COLLECT_METADATA = 19,
+ IFLA_IPTUN_FWMARK = 20,
+ __IFLA_IPTUN_MAX = 21,
+};
+
+struct ip_tunnel_prl {
+ __be32 addr;
+ __u16 flags;
+ __u16 __reserved;
+ __u32 datalen;
+ __u32 __reserved2;
+};
+
+struct sit_net {
+ struct ip_tunnel *tunnels_r_l[16];
+ struct ip_tunnel *tunnels_r[16];
+ struct ip_tunnel *tunnels_l[16];
+ struct ip_tunnel *tunnels_wc[1];
+ struct ip_tunnel **tunnels[4];
+ struct net_device *fb_tunnel_dev;
+};
+
+enum {
+ IP6_FH_F_FRAG = 1,
+ IP6_FH_F_AUTH = 2,
+ IP6_FH_F_SKIP_RH = 4,
+};
+
+typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *);
+
+struct sockaddr_pkt {
+ short unsigned int spkt_family;
+ unsigned char spkt_device[14];
+ __be16 spkt_protocol;
+};
+
+struct sockaddr_ll {
+ short unsigned int sll_family;
+ __be16 sll_protocol;
+ int sll_ifindex;
+ short unsigned int sll_hatype;
+ unsigned char sll_pkttype;
+ unsigned char sll_halen;
+ unsigned char sll_addr[8];
+};
+
+struct tpacket_stats {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+};
+
+struct tpacket_stats_v3 {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+ unsigned int tp_freeze_q_cnt;
+};
+
+struct tpacket_rollover_stats {
+ __u64 tp_all;
+ __u64 tp_huge;
+ __u64 tp_failed;
+};
+
+union tpacket_stats_u {
+ struct tpacket_stats stats1;
+ struct tpacket_stats_v3 stats3;
+};
+
+struct tpacket_auxdata {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+};
+
+struct tpacket_hdr {
+ long unsigned int tp_status;
+ unsigned int tp_len;
+ unsigned int tp_snaplen;
+ short unsigned int tp_mac;
+ short unsigned int tp_net;
+ unsigned int tp_sec;
+ unsigned int tp_usec;
+};
+
+struct tpacket2_hdr {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u8 tp_padding[4];
+};
+
+struct tpacket_hdr_variant1 {
+ __u32 tp_rxhash;
+ __u32 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u16 tp_padding;
+};
+
+struct tpacket3_hdr {
+ __u32 tp_next_offset;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u32 tp_snaplen;
+ __u32 tp_len;
+ __u32 tp_status;
+ __u16 tp_mac;
+ __u16 tp_net;
+ union {
+ struct tpacket_hdr_variant1 hv1;
+ };
+ __u8 tp_padding[8];
+};
+
+struct tpacket_bd_ts {
+ unsigned int ts_sec;
+ union {
+ unsigned int ts_usec;
+ unsigned int ts_nsec;
+ };
+};
+
+struct tpacket_hdr_v1 {
+ __u32 block_status;
+ __u32 num_pkts;
+ __u32 offset_to_first_pkt;
+ __u32 blk_len;
+ __u64 seq_num;
+ struct tpacket_bd_ts ts_first_pkt;
+ struct tpacket_bd_ts ts_last_pkt;
+};
+
+union tpacket_bd_header_u {
+ struct tpacket_hdr_v1 bh1;
+};
+
+struct tpacket_block_desc {
+ __u32 version;
+ __u32 offset_to_priv;
+ union tpacket_bd_header_u hdr;
+};
+
+enum tpacket_versions {
+ TPACKET_V1 = 0,
+ TPACKET_V2 = 1,
+ TPACKET_V3 = 2,
+};
+
+struct tpacket_req {
+ unsigned int tp_block_size;
+ unsigned int tp_block_nr;
+ unsigned int tp_frame_size;
+ unsigned int tp_frame_nr;
+};
+
+struct tpacket_req3 {
+ unsigned int tp_block_size;
+ unsigned int tp_block_nr;
+ unsigned int tp_frame_size;
+ unsigned int tp_frame_nr;
+ unsigned int tp_retire_blk_tov;
+ unsigned int tp_sizeof_priv;
+ unsigned int tp_feature_req_word;
+};
+
+union tpacket_req_u {
+ struct tpacket_req req;
+ struct tpacket_req3 req3;
+};
+
+struct packet_mclist {
+ struct packet_mclist *next;
+ int ifindex;
+ int count;
+ short unsigned int type;
+ short unsigned int alen;
+ unsigned char addr[32];
+};
+
+struct pgv;
+
+struct tpacket_kbdq_core {
+ struct pgv *pkbdq;
+ unsigned int feature_req_word;
+ unsigned int hdrlen;
+ unsigned char reset_pending_on_curr_blk;
+ unsigned char delete_blk_timer;
+ short unsigned int kactive_blk_num;
+ short unsigned int blk_sizeof_priv;
+ short unsigned int last_kactive_blk_num;
+ char *pkblk_start;
+ char *pkblk_end;
+ int kblk_size;
+ unsigned int max_frame_len;
+ unsigned int knum_blocks;
+ uint64_t knxt_seq_num;
+ char *prev;
+ char *nxt_offset;
+ struct sk_buff *skb;
+ atomic_t blk_fill_in_prog;
+ short unsigned int retire_blk_tov;
+ short unsigned int version;
+ long unsigned int tov_in_jiffies;
+ struct timer_list retire_blk_timer;
+};
+
+struct pgv {
+ char *buffer;
+};
+
+struct packet_ring_buffer {
+ struct pgv *pg_vec;
+ unsigned int head;
+ unsigned int frames_per_block;
+ unsigned int frame_size;
+ unsigned int frame_max;
+ unsigned int pg_vec_order;
+ unsigned int pg_vec_pages;
+ unsigned int pg_vec_len;
+ unsigned int *pending_refcnt;
+ union {
+ long unsigned int *rx_owner_map;
+ struct tpacket_kbdq_core prb_bdqc;
+ };
+};
+
+struct packet_fanout {
+ possible_net_t net;
+ unsigned int num_members;
+ u16 id;
+ u8 type;
+ u8 flags;
+ union {
+ atomic_t rr_cur;
+ struct bpf_prog *bpf_prog;
+ };
+ struct list_head list;
+ struct sock *arr[256];
+ spinlock_t lock;
+ refcount_t sk_ref;
+ long: 32;
+ long: 64;
+ long: 64;
+ struct packet_type prot_hook;
+};
+
+struct packet_rollover {
+ int sock;
+ atomic_long_t num;
+ atomic_long_t num_huge;
+ atomic_long_t num_failed;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u32 history[16];
+};
+
+struct packet_sock {
+ struct sock sk;
+ struct packet_fanout *fanout;
+ union tpacket_stats_u stats;
+ struct packet_ring_buffer rx_ring;
+ struct packet_ring_buffer tx_ring;
+ int copy_thresh;
+ spinlock_t bind_lock;
+ struct mutex pg_vec_lock;
+ unsigned int running;
+ unsigned int auxdata: 1;
+ unsigned int origdev: 1;
+ unsigned int has_vnet_hdr: 1;
+ unsigned int tp_loss: 1;
+ unsigned int tp_tx_has_off: 1;
+ int pressure;
+ int ifindex;
+ __be16 num;
+ struct packet_rollover *rollover;
+ struct packet_mclist *mclist;
+ atomic_t mapped;
+ enum tpacket_versions tp_version;
+ unsigned int tp_hdrlen;
+ unsigned int tp_reserve;
+ unsigned int tp_tstamp;
+ struct completion skb_completion;
+ struct net_device *cached_dev;
+ int (*xmit)(struct sk_buff *);
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ struct packet_type prot_hook;
+ atomic_t tp_drops;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct packet_mreq_max {
+ int mr_ifindex;
+ short unsigned int mr_type;
+ short unsigned int mr_alen;
+ unsigned char mr_address[32];
+};
+
+union tpacket_uhdr {
+ struct tpacket_hdr *h1;
+ struct tpacket2_hdr *h2;
+ struct tpacket3_hdr *h3;
+ void *raw;
+};
+
+struct packet_skb_cb {
+ union {
+ struct sockaddr_pkt pkt;
+ union {
+ unsigned int origlen;
+ struct sockaddr_ll ll;
+ };
+ } sa;
+};
+
+struct packet_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 pdiag_ino;
+ __u32 pdiag_show;
+ __u32 pdiag_cookie[2];
+};
+
+struct packet_diag_msg {
+ __u8 pdiag_family;
+ __u8 pdiag_type;
+ __u16 pdiag_num;
+ __u32 pdiag_ino;
+ __u32 pdiag_cookie[2];
+};
+
+enum {
+ PACKET_DIAG_INFO = 0,
+ PACKET_DIAG_MCLIST = 1,
+ PACKET_DIAG_RX_RING = 2,
+ PACKET_DIAG_TX_RING = 3,
+ PACKET_DIAG_FANOUT = 4,
+ PACKET_DIAG_UID = 5,
+ PACKET_DIAG_MEMINFO = 6,
+ PACKET_DIAG_FILTER = 7,
+ __PACKET_DIAG_MAX = 8,
+};
+
+struct packet_diag_info {
+ __u32 pdi_index;
+ __u32 pdi_version;
+ __u32 pdi_reserve;
+ __u32 pdi_copy_thresh;
+ __u32 pdi_tstamp;
+ __u32 pdi_flags;
+};
+
+struct packet_diag_mclist {
+ __u32 pdmc_index;
+ __u32 pdmc_count;
+ __u16 pdmc_type;
+ __u16 pdmc_alen;
+ __u8 pdmc_addr[32];
+};
+
+struct packet_diag_ring {
+ __u32 pdr_block_size;
+ __u32 pdr_block_nr;
+ __u32 pdr_frame_size;
+ __u32 pdr_frame_nr;
+ __u32 pdr_retire_tmo;
+ __u32 pdr_sizeof_priv;
+ __u32 pdr_features;
+};
+
+enum switchdev_notifier_type {
+ SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE = 2,
+ SWITCHDEV_FDB_ADD_TO_DEVICE = 3,
+ SWITCHDEV_FDB_DEL_TO_DEVICE = 4,
+ SWITCHDEV_FDB_OFFLOADED = 5,
+ SWITCHDEV_PORT_OBJ_ADD = 6,
+ SWITCHDEV_PORT_OBJ_DEL = 7,
+ SWITCHDEV_PORT_ATTR_SET = 8,
+ SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 9,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 10,
+ SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 11,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 12,
+ SWITCHDEV_VXLAN_FDB_OFFLOADED = 13,
+};
+
+struct switchdev_notifier_info {
+ struct net_device *dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct switchdev_notifier_fdb_info {
+ struct switchdev_notifier_info info;
+ const unsigned char *addr;
+ u16 vid;
+ u8 added_by_user: 1;
+ u8 offloaded: 1;
+};
+
+enum br_boolopt_id {
+ BR_BOOLOPT_NO_LL_LEARN = 0,
+ BR_BOOLOPT_MAX = 1,
+};
+
+struct br_boolopt_multi {
+ __u32 optval;
+ __u32 optmask;
+};
+
+enum net_bridge_opts {
+ BROPT_VLAN_ENABLED = 0,
+ BROPT_VLAN_STATS_ENABLED = 1,
+ BROPT_NF_CALL_IPTABLES = 2,
+ BROPT_NF_CALL_IP6TABLES = 3,
+ BROPT_NF_CALL_ARPTABLES = 4,
+ BROPT_GROUP_ADDR_SET = 5,
+ BROPT_MULTICAST_ENABLED = 6,
+ BROPT_MULTICAST_QUERIER = 7,
+ BROPT_MULTICAST_QUERY_USE_IFADDR = 8,
+ BROPT_MULTICAST_STATS_ENABLED = 9,
+ BROPT_HAS_IPV6_ADDR = 10,
+ BROPT_NEIGH_SUPPRESS_ENABLED = 11,
+ BROPT_MTU_SET_BY_USER = 12,
+ BROPT_VLAN_STATS_PER_PORT = 13,
+ BROPT_NO_LL_LEARN = 14,
+ BROPT_VLAN_BRIDGE_BINDING = 15,
+};
+
+struct net_bridge_vlan_group {
+ struct rhashtable vlan_hash;
+ struct rhashtable tunnel_hash;
+ struct list_head vlan_list;
+ u16 num_vlans;
+ u16 pvid;
+ u8 pvid_state;
+};
+
+struct net_bridge_port_group {
+ struct net_bridge_port *port;
+ struct net_bridge_port_group *next;
+ struct hlist_node mglist;
+ struct callback_head rcu;
+ struct timer_list timer;
+ struct br_ip addr;
+ unsigned char flags;
+ unsigned char eth_addr[6];
+};
+
+struct net_bridge_mdb_entry {
+ struct rhash_head rhnode;
+ struct net_bridge *br;
+ struct net_bridge_port_group *ports;
+ struct callback_head rcu;
+ struct timer_list timer;
+ struct br_ip addr;
+ bool host_joined;
+ struct hlist_node mdb_node;
+};
+
+enum br_pkt_type {
+ BR_PKT_UNICAST = 0,
+ BR_PKT_MULTICAST = 1,
+ BR_PKT_BROADCAST = 2,
+};
+
+struct __fdb_entry {
+ __u8 mac_addr[6];
+ __u8 port_no;
+ __u8 is_local;
+ __u32 ageing_timer_value;
+ __u8 port_hi;
+ __u8 pad0;
+ __u16 unused;
+};
+
+struct br_vlan_stats {
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 tx_bytes;
+ u64 tx_packets;
+ struct u64_stats_sync syncp;
+};
+
+struct br_tunnel_info {
+ __be64 tunnel_id;
+ struct metadata_dst *tunnel_dst;
+};
+
+struct net_bridge_vlan {
+ struct rhash_head vnode;
+ struct rhash_head tnode;
+ u16 vid;
+ u16 flags;
+ u16 priv_flags;
+ u8 state;
+ struct br_vlan_stats *stats;
+ union {
+ struct net_bridge *br;
+ struct net_bridge_port *port;
+ };
+ union {
+ refcount_t refcnt;
+ struct net_bridge_vlan *brvlan;
+ };
+ struct br_tunnel_info tinfo;
+ struct list_head vlist;
+ struct callback_head rcu;
+};
+
+enum {
+ BR_FDB_LOCAL = 0,
+ BR_FDB_STATIC = 1,
+ BR_FDB_STICKY = 2,
+ BR_FDB_ADDED_BY_USER = 3,
+ BR_FDB_ADDED_BY_EXT_LEARN = 4,
+ BR_FDB_OFFLOADED = 5,
+};
+
+struct __bridge_info {
+ __u64 designated_root;
+ __u64 bridge_id;
+ __u32 root_path_cost;
+ __u32 max_age;
+ __u32 hello_time;
+ __u32 forward_delay;
+ __u32 bridge_max_age;
+ __u32 bridge_hello_time;
+ __u32 bridge_forward_delay;
+ __u8 topology_change;
+ __u8 topology_change_detected;
+ __u8 root_port;
+ __u8 stp_enabled;
+ __u32 ageing_time;
+ __u32 gc_interval;
+ __u32 hello_timer_value;
+ __u32 tcn_timer_value;
+ __u32 topology_change_timer_value;
+ __u32 gc_timer_value;
+};
+
+struct __port_info {
+ __u64 designated_root;
+ __u64 designated_bridge;
+ __u16 port_id;
+ __u16 designated_port;
+ __u32 path_cost;
+ __u32 designated_cost;
+ __u8 state;
+ __u8 top_change_ack;
+ __u8 config_pending;
+ __u8 unused0;
+ __u32 message_age_timer_value;
+ __u32 forward_delay_timer_value;
+ __u32 hold_timer_value;
+};
+
+enum switchdev_attr_id {
+ SWITCHDEV_ATTR_ID_UNDEFINED = 0,
+ SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1,
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 2,
+ SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 3,
+ SWITCHDEV_ATTR_ID_PORT_MROUTER = 4,
+ SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 5,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 6,
+ SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 7,
+ SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 8,
+};
+
+struct switchdev_attr {
+ struct net_device *orig_dev;
+ enum switchdev_attr_id id;
+ u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *, int, void *);
+ union {
+ u8 stp_state;
+ long unsigned int brport_flags;
+ bool mrouter;
+ clock_t ageing_time;
+ bool vlan_filtering;
+ bool mc_disabled;
+ } u;
+};
+
+struct br_config_bpdu {
+ unsigned int topology_change: 1;
+ unsigned int topology_change_ack: 1;
+ bridge_id root;
+ int root_path_cost;
+ bridge_id bridge_id;
+ port_id port_id;
+ int message_age;
+ int max_age;
+ int hello_time;
+ int forward_delay;
+};
+
+enum {
+ IFLA_BR_UNSPEC = 0,
+ IFLA_BR_FORWARD_DELAY = 1,
+ IFLA_BR_HELLO_TIME = 2,
+ IFLA_BR_MAX_AGE = 3,
+ IFLA_BR_AGEING_TIME = 4,
+ IFLA_BR_STP_STATE = 5,
+ IFLA_BR_PRIORITY = 6,
+ IFLA_BR_VLAN_FILTERING = 7,
+ IFLA_BR_VLAN_PROTOCOL = 8,
+ IFLA_BR_GROUP_FWD_MASK = 9,
+ IFLA_BR_ROOT_ID = 10,
+ IFLA_BR_BRIDGE_ID = 11,
+ IFLA_BR_ROOT_PORT = 12,
+ IFLA_BR_ROOT_PATH_COST = 13,
+ IFLA_BR_TOPOLOGY_CHANGE = 14,
+ IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 15,
+ IFLA_BR_HELLO_TIMER = 16,
+ IFLA_BR_TCN_TIMER = 17,
+ IFLA_BR_TOPOLOGY_CHANGE_TIMER = 18,
+ IFLA_BR_GC_TIMER = 19,
+ IFLA_BR_GROUP_ADDR = 20,
+ IFLA_BR_FDB_FLUSH = 21,
+ IFLA_BR_MCAST_ROUTER = 22,
+ IFLA_BR_MCAST_SNOOPING = 23,
+ IFLA_BR_MCAST_QUERY_USE_IFADDR = 24,
+ IFLA_BR_MCAST_QUERIER = 25,
+ IFLA_BR_MCAST_HASH_ELASTICITY = 26,
+ IFLA_BR_MCAST_HASH_MAX = 27,
+ IFLA_BR_MCAST_LAST_MEMBER_CNT = 28,
+ IFLA_BR_MCAST_STARTUP_QUERY_CNT = 29,
+ IFLA_BR_MCAST_LAST_MEMBER_INTVL = 30,
+ IFLA_BR_MCAST_MEMBERSHIP_INTVL = 31,
+ IFLA_BR_MCAST_QUERIER_INTVL = 32,
+ IFLA_BR_MCAST_QUERY_INTVL = 33,
+ IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 34,
+ IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 35,
+ IFLA_BR_NF_CALL_IPTABLES = 36,
+ IFLA_BR_NF_CALL_IP6TABLES = 37,
+ IFLA_BR_NF_CALL_ARPTABLES = 38,
+ IFLA_BR_VLAN_DEFAULT_PVID = 39,
+ IFLA_BR_PAD = 40,
+ IFLA_BR_VLAN_STATS_ENABLED = 41,
+ IFLA_BR_MCAST_STATS_ENABLED = 42,
+ IFLA_BR_MCAST_IGMP_VERSION = 43,
+ IFLA_BR_MCAST_MLD_VERSION = 44,
+ IFLA_BR_VLAN_STATS_PER_PORT = 45,
+ IFLA_BR_MULTI_BOOLOPT = 46,
+ __IFLA_BR_MAX = 47,
+};
+
+enum {
+ LINK_XSTATS_TYPE_UNSPEC = 0,
+ LINK_XSTATS_TYPE_BRIDGE = 1,
+ LINK_XSTATS_TYPE_BOND = 2,
+ __LINK_XSTATS_TYPE_MAX = 3,
+};
+
+struct bridge_vlan_info {
+ __u16 flags;
+ __u16 vid;
+};
+
+struct bridge_vlan_xstats {
+ __u64 rx_bytes;
+ __u64 rx_packets;
+ __u64 tx_bytes;
+ __u64 tx_packets;
+ __u16 vid;
+ __u16 flags;
+ __u32 pad2;
+};
+
+enum {
+ BRIDGE_XSTATS_UNSPEC = 0,
+ BRIDGE_XSTATS_VLAN = 1,
+ BRIDGE_XSTATS_MCAST = 2,
+ BRIDGE_XSTATS_PAD = 3,
+ BRIDGE_XSTATS_STP = 4,
+ __BRIDGE_XSTATS_MAX = 5,
+};
+
+enum {
+ BR_GROUPFWD_STP = 1,
+ BR_GROUPFWD_MACPAUSE = 2,
+ BR_GROUPFWD_LACP = 4,
+};
+
+struct vtunnel_info {
+ u32 tunid;
+ u16 vid;
+ u16 flags;
+};
+
+enum {
+ IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = 0,
+ IFLA_BRIDGE_VLAN_TUNNEL_ID = 1,
+ IFLA_BRIDGE_VLAN_TUNNEL_VID = 2,
+ IFLA_BRIDGE_VLAN_TUNNEL_FLAGS = 3,
+ __IFLA_BRIDGE_VLAN_TUNNEL_MAX = 4,
+};
+
+struct brport_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct net_bridge_port *, char *);
+ int (*store)(struct net_bridge_port *, long unsigned int);
+ int (*store_raw)(struct net_bridge_port *, char *);
+};
+
+enum {
+ PIM_TYPE_HELLO = 0,
+ PIM_TYPE_REGISTER = 1,
+ PIM_TYPE_REGISTER_STOP = 2,
+ PIM_TYPE_JOIN_PRUNE = 3,
+ PIM_TYPE_BOOTSTRAP = 4,
+ PIM_TYPE_ASSERT = 5,
+ PIM_TYPE_GRAFT = 6,
+ PIM_TYPE_GRAFT_ACK = 7,
+ PIM_TYPE_CANDIDATE_RP_ADV = 8,
+};
+
+struct pimhdr {
+ __u8 type;
+ __u8 reserved;
+ __be16 csum;
+};
+
+enum {
+ MDB_RTR_TYPE_DISABLED = 0,
+ MDB_RTR_TYPE_TEMP_QUERY = 1,
+ MDB_RTR_TYPE_PERM = 2,
+ MDB_RTR_TYPE_TEMP = 3,
+};
+
+struct br_ip_list {
+ struct list_head list;
+ struct br_ip addr;
+};
+
+enum switchdev_obj_id {
+ SWITCHDEV_OBJ_ID_UNDEFINED = 0,
+ SWITCHDEV_OBJ_ID_PORT_VLAN = 1,
+ SWITCHDEV_OBJ_ID_PORT_MDB = 2,
+ SWITCHDEV_OBJ_ID_HOST_MDB = 3,
+};
+
+struct switchdev_obj {
+ struct net_device *orig_dev;
+ enum switchdev_obj_id id;
+ u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *, int, void *);
+};
+
+struct switchdev_obj_port_mdb {
+ struct switchdev_obj obj;
+ unsigned char addr[6];
+ u16 vid;
+};
+
+enum {
+ MDBA_UNSPEC = 0,
+ MDBA_MDB = 1,
+ MDBA_ROUTER = 2,
+ __MDBA_MAX = 3,
+};
+
+enum {
+ MDBA_MDB_UNSPEC = 0,
+ MDBA_MDB_ENTRY = 1,
+ __MDBA_MDB_MAX = 2,
+};
+
+enum {
+ MDBA_MDB_ENTRY_UNSPEC = 0,
+ MDBA_MDB_ENTRY_INFO = 1,
+ __MDBA_MDB_ENTRY_MAX = 2,
+};
+
+enum {
+ MDBA_MDB_EATTR_UNSPEC = 0,
+ MDBA_MDB_EATTR_TIMER = 1,
+ __MDBA_MDB_EATTR_MAX = 2,
+};
+
+enum {
+ MDBA_ROUTER_UNSPEC = 0,
+ MDBA_ROUTER_PORT = 1,
+ __MDBA_ROUTER_MAX = 2,
+};
+
+enum {
+ MDBA_ROUTER_PATTR_UNSPEC = 0,
+ MDBA_ROUTER_PATTR_TIMER = 1,
+ MDBA_ROUTER_PATTR_TYPE = 2,
+ __MDBA_ROUTER_PATTR_MAX = 3,
+};
+
+struct br_port_msg {
+ __u8 family;
+ __u32 ifindex;
+};
+
+struct br_mdb_entry {
+ __u32 ifindex;
+ __u8 state;
+ __u8 flags;
+ __u16 vid;
+ struct {
+ union {
+ __be32 ip4;
+ struct in6_addr ip6;
+ } u;
+ __be16 proto;
+ } addr;
+};
+
+enum {
+ MDBA_SET_ENTRY_UNSPEC = 0,
+ MDBA_SET_ENTRY = 1,
+ __MDBA_SET_ENTRY_MAX = 2,
+};
+
+struct br_mdb_complete_info {
+ struct net_bridge_port *port;
+ struct br_ip ip;
+};
+
+struct switchdev_trans {
+ bool ph_prepare;
+};
+
+struct switchdev_obj_port_vlan {
+ struct switchdev_obj obj;
+ u16 flags;
+ u16 vid_begin;
+ u16 vid_end;
+};
+
+struct switchdev_notifier_port_attr_info {
+ struct switchdev_notifier_info info;
+ const struct switchdev_attr *attr;
+ struct switchdev_trans *trans;
+ bool handled;
+};
+
+struct _strp_msg {
+ struct strp_msg strp;
+ int accum_len;
+};
+
+enum p9_msg_t {
+ P9_TLERROR = 6,
+ P9_RLERROR = 7,
+ P9_TSTATFS = 8,
+ P9_RSTATFS = 9,
+ P9_TLOPEN = 12,
+ P9_RLOPEN = 13,
+ P9_TLCREATE = 14,
+ P9_RLCREATE = 15,
+ P9_TSYMLINK = 16,
+ P9_RSYMLINK = 17,
+ P9_TMKNOD = 18,
+ P9_RMKNOD = 19,
+ P9_TRENAME = 20,
+ P9_RRENAME = 21,
+ P9_TREADLINK = 22,
+ P9_RREADLINK = 23,
+ P9_TGETATTR = 24,
+ P9_RGETATTR = 25,
+ P9_TSETATTR = 26,
+ P9_RSETATTR = 27,
+ P9_TXATTRWALK = 30,
+ P9_RXATTRWALK = 31,
+ P9_TXATTRCREATE = 32,
+ P9_RXATTRCREATE = 33,
+ P9_TREADDIR = 40,
+ P9_RREADDIR = 41,
+ P9_TFSYNC = 50,
+ P9_RFSYNC = 51,
+ P9_TLOCK = 52,
+ P9_RLOCK = 53,
+ P9_TGETLOCK = 54,
+ P9_RGETLOCK = 55,
+ P9_TLINK = 70,
+ P9_RLINK = 71,
+ P9_TMKDIR = 72,
+ P9_RMKDIR = 73,
+ P9_TRENAMEAT = 74,
+ P9_RRENAMEAT = 75,
+ P9_TUNLINKAT = 76,
+ P9_RUNLINKAT = 77,
+ P9_TVERSION = 100,
+ P9_RVERSION = 101,
+ P9_TAUTH = 102,
+ P9_RAUTH = 103,
+ P9_TATTACH = 104,
+ P9_RATTACH = 105,
+ P9_TERROR = 106,
+ P9_RERROR = 107,
+ P9_TFLUSH = 108,
+ P9_RFLUSH = 109,
+ P9_TWALK = 110,
+ P9_RWALK = 111,
+ P9_TOPEN = 112,
+ P9_ROPEN = 113,
+ P9_TCREATE = 114,
+ P9_RCREATE = 115,
+ P9_TREAD = 116,
+ P9_RREAD = 117,
+ P9_TWRITE = 118,
+ P9_RWRITE = 119,
+ P9_TCLUNK = 120,
+ P9_RCLUNK = 121,
+ P9_TREMOVE = 122,
+ P9_RREMOVE = 123,
+ P9_TSTAT = 124,
+ P9_RSTAT = 125,
+ P9_TWSTAT = 126,
+ P9_RWSTAT = 127,
+};
+
+enum p9_proto_versions {
+ p9_proto_legacy = 0,
+ p9_proto_2000u = 1,
+ p9_proto_2000L = 2,
+};
+
+enum p9_req_status_t {
+ REQ_STATUS_ALLOC = 0,
+ REQ_STATUS_UNSENT = 1,
+ REQ_STATUS_SENT = 2,
+ REQ_STATUS_RCVD = 3,
+ REQ_STATUS_FLSHD = 4,
+ REQ_STATUS_ERROR = 5,
+};
+
+struct trace_event_raw_9p_client_req {
+ struct trace_entry ent;
+ void *clnt;
+ __u8 type;
+ __u32 tag;
+ char __data[0];
+};
+
+struct trace_event_raw_9p_client_res {
+ struct trace_entry ent;
+ void *clnt;
+ __u8 type;
+ __u32 tag;
+ __u32 err;
+ char __data[0];
+};
+
+struct trace_event_raw_9p_protocol_dump {
+ struct trace_entry ent;
+ void *clnt;
+ __u8 type;
+ __u16 tag;
+ unsigned char line[32];
+ char __data[0];
+};
+
+struct trace_event_data_offsets_9p_client_req {};
+
+struct trace_event_data_offsets_9p_client_res {};
+
+struct trace_event_data_offsets_9p_protocol_dump {};
+
+typedef void (*btf_trace_9p_client_req)(void *, struct p9_client *, int8_t, int);
+
+typedef void (*btf_trace_9p_client_res)(void *, struct p9_client *, int8_t, int, int);
+
+typedef void (*btf_trace_9p_protocol_dump)(void *, struct p9_client *, struct p9_fcall *);
+
+enum {
+ Opt_msize = 0,
+ Opt_trans = 1,
+ Opt_legacy = 2,
+ Opt_version = 3,
+ Opt_err___6 = 4,
+};
+
+struct errormap {
+ char *name;
+ int val;
+ int namelen;
+ struct hlist_node list;
+};
+
+struct p9_fd_opts {
+ int rfd;
+ int wfd;
+ u16 port;
+ bool privport;
+};
+
+enum {
+ Opt_port = 0,
+ Opt_rfdno = 1,
+ Opt_wfdno = 2,
+ Opt_err___7 = 3,
+ Opt_privport = 4,
+};
+
+enum {
+ Rworksched = 1,
+ Rpending = 2,
+ Wworksched = 4,
+ Wpending = 8,
+};
+
+struct p9_conn;
+
+struct p9_poll_wait {
+ struct p9_conn *conn;
+ wait_queue_entry_t wait;
+ wait_queue_head_t *wait_addr;
+};
+
+struct p9_conn {
+ struct list_head mux_list;
+ struct p9_client *client;
+ int err;
+ struct list_head req_list;
+ struct list_head unsent_req_list;
+ struct p9_req_t *rreq;
+ struct p9_req_t *wreq;
+ char tmp_buf[7];
+ struct p9_fcall rc;
+ int wpos;
+ int wsize;
+ char *wbuf;
+ struct list_head poll_pending_link;
+ struct p9_poll_wait poll_wait[2];
+ poll_table pt;
+ struct work_struct rq;
+ struct work_struct wq;
+ long unsigned int wsched;
+};
+
+struct p9_trans_fd {
+ struct file *rd;
+ struct file *wr;
+ struct p9_conn conn;
+};
+
+struct virtio_9p_config {
+ __u16 tag_len;
+ __u8 tag[0];
+};
+
+struct virtio_chan {
+ bool inuse;
+ spinlock_t lock;
+ struct p9_client *client;
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ int ring_bufs_avail;
+ wait_queue_head_t *vc_wq;
+ long unsigned int p9_max_pages;
+ struct scatterlist sg[128];
+ char *tag;
+ struct list_head chan_list;
+};
+
+struct sockaddr_vm {
+ __kernel_sa_family_t svm_family;
+ short unsigned int svm_reserved1;
+ unsigned int svm_port;
+ unsigned int svm_cid;
+ unsigned char svm_zero[4];
+};
+
+struct vsock_transport;
+
+struct vsock_sock {
+ struct sock sk;
+ const struct vsock_transport *transport;
+ struct sockaddr_vm local_addr;
+ struct sockaddr_vm remote_addr;
+ struct list_head bound_table;
+ struct list_head connected_table;
+ bool trusted;
+ bool cached_peer_allow_dgram;
+ u32 cached_peer;
+ const struct cred *owner;
+ long int connect_timeout;
+ struct sock *listener;
+ struct list_head pending_links;
+ struct list_head accept_queue;
+ bool rejected;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
+ struct delayed_work close_work;
+ bool close_work_scheduled;
+ u32 peer_shutdown;
+ bool sent_request;
+ bool ignore_connecting_rst;
+ u64 buffer_size;
+ u64 buffer_min_size;
+ u64 buffer_max_size;
+ void *trans;
+};
+
+struct vsock_transport_recv_notify_data;
+
+struct vsock_transport_send_notify_data;
+
+struct vsock_transport {
+ struct module *module;
+ int (*init)(struct vsock_sock *, struct vsock_sock *);
+ void (*destruct)(struct vsock_sock *);
+ void (*release)(struct vsock_sock *);
+ int (*cancel_pkt)(struct vsock_sock *);
+ int (*connect)(struct vsock_sock *);
+ int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
+ int (*dgram_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int);
+ int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, struct msghdr *, size_t);
+ bool (*dgram_allow)(u32, u32);
+ ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int);
+ ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t);
+ s64 (*stream_has_data)(struct vsock_sock *);
+ s64 (*stream_has_space)(struct vsock_sock *);
+ u64 (*stream_rcvhiwat)(struct vsock_sock *);
+ bool (*stream_is_active)(struct vsock_sock *);
+ bool (*stream_allow)(u32, u32);
+ int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
+ int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
+ int (*notify_recv_init)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_block)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *);
+ int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, ssize_t, bool, struct vsock_transport_recv_notify_data *);
+ int (*notify_send_init)(struct vsock_sock *, struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_block)(struct vsock_sock *, struct vsock_transport_send_notify_data *);
+ int (*notify_send_pre_enqueue)(struct vsock_sock *, struct vsock_transport_send_notify_data *);
+ int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, struct vsock_transport_send_notify_data *);
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
+ int (*shutdown)(struct vsock_sock *, int);
+ u32 (*get_local_cid)();
+};
+
+struct vsock_transport_recv_notify_data {
+ u64 data1;
+ u64 data2;
+ bool notify_on_block;
+};
+
+struct vsock_transport_send_notify_data {
+ u64 data1;
+ u64 data2;
+};
+
+struct vsock_tap {
+ struct net_device *dev;
+ struct module *module;
+ struct list_head list;
+};
+
+enum virtio_vsock_event_id {
+ VIRTIO_VSOCK_EVENT_TRANSPORT_RESET = 0,
+};
+
+struct virtio_vsock_event {
+ __le32 id;
+};
+
+struct virtio_vsock_hdr {
+ __le64 src_cid;
+ __le64 dst_cid;
+ __le32 src_port;
+ __le32 dst_port;
+ __le32 len;
+ __le16 type;
+ __le16 op;
+ __le32 flags;
+ __le32 buf_alloc;
+ __le32 fwd_cnt;
+} __attribute__((packed));
+
+enum {
+ VSOCK_VQ_RX = 0,
+ VSOCK_VQ_TX = 1,
+ VSOCK_VQ_EVENT = 2,
+ VSOCK_VQ_MAX = 3,
+};
+
+struct virtio_vsock_pkt {
+ struct virtio_vsock_hdr hdr;
+ struct list_head list;
+ struct vsock_sock *vsk;
+ void *buf;
+ u32 buf_len;
+ u32 len;
+ u32 off;
+ bool reply;
+ bool tap_delivered;
+};
+
+struct virtio_transport {
+ struct vsock_transport transport;
+ int (*send_pkt)(struct virtio_vsock_pkt *);
+};
+
+struct virtio_vsock {
+ struct virtio_device *vdev;
+ struct virtqueue *vqs[3];
+ struct work_struct tx_work;
+ struct work_struct rx_work;
+ struct work_struct event_work;
+ struct mutex tx_lock;
+ bool tx_run;
+ long: 56;
+ struct work_struct send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list;
+ atomic_t queued_replies;
+ int: 32;
+ struct mutex rx_lock;
+ bool rx_run;
+ int: 24;
+ int rx_buf_nr;
+ int rx_buf_max_nr;
+ int: 32;
+ struct mutex event_lock;
+ bool event_run;
+ struct virtio_vsock_event event_list[8];
+ int: 24;
+ u32 guest_cid;
+} __attribute__((packed));
+
+enum virtio_vsock_type {
+ VIRTIO_VSOCK_TYPE_STREAM = 1,
+};
+
+enum virtio_vsock_op {
+ VIRTIO_VSOCK_OP_INVALID = 0,
+ VIRTIO_VSOCK_OP_REQUEST = 1,
+ VIRTIO_VSOCK_OP_RESPONSE = 2,
+ VIRTIO_VSOCK_OP_RST = 3,
+ VIRTIO_VSOCK_OP_SHUTDOWN = 4,
+ VIRTIO_VSOCK_OP_RW = 5,
+ VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6,
+ VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7,
+};
+
+enum virtio_vsock_shutdown {
+ VIRTIO_VSOCK_SHUTDOWN_RCV = 1,
+ VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
+};
+
+struct virtio_vsock_sock {
+ struct vsock_sock *vsk;
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ u32 tx_cnt;
+ u32 peer_fwd_cnt;
+ u32 peer_buf_alloc;
+ u32 fwd_cnt;
+ u32 last_fwd_cnt;
+ u32 rx_bytes;
+ u32 buf_alloc;
+ struct list_head rx_queue;
+};
+
+struct virtio_vsock_pkt_info {
+ u32 remote_cid;
+ u32 remote_port;
+ struct vsock_sock *vsk;
+ struct msghdr *msg;
+ u32 pkt_len;
+ u16 type;
+ u16 op;
+ u32 flags;
+ bool reply;
+};
+
+struct af_vsockmon_hdr {
+ __le64 src_cid;
+ __le64 dst_cid;
+ __le32 src_port;
+ __le32 dst_port;
+ __le16 op;
+ __le16 transport;
+ __le16 len;
+ __u8 reserved[2];
+};
+
+enum af_vsockmon_op {
+ AF_VSOCK_OP_UNKNOWN = 0,
+ AF_VSOCK_OP_CONNECT = 1,
+ AF_VSOCK_OP_DISCONNECT = 2,
+ AF_VSOCK_OP_CONTROL = 3,
+ AF_VSOCK_OP_PAYLOAD = 4,
+};
+
+enum af_vsockmon_transport {
+ AF_VSOCK_TRANSPORT_UNKNOWN = 0,
+ AF_VSOCK_TRANSPORT_NO_INFO = 1,
+ AF_VSOCK_TRANSPORT_VIRTIO = 2,
+};
+
+struct trace_event_raw_virtio_transport_alloc_pkt {
+ struct trace_entry ent;
+ __u32 src_cid;
+ __u32 src_port;
+ __u32 dst_cid;
+ __u32 dst_port;
+ __u32 len;
+ __u16 type;
+ __u16 op;
+ __u32 flags;
+ char __data[0];
+};
+
+struct trace_event_raw_virtio_transport_recv_pkt {
+ struct trace_entry ent;
+ __u32 src_cid;
+ __u32 src_port;
+ __u32 dst_cid;
+ __u32 dst_port;
+ __u32 len;
+ __u16 type;
+ __u16 op;
+ __u32 flags;
+ __u32 buf_alloc;
+ __u32 fwd_cnt;
+ char __data[0];
+};
+
+struct trace_event_data_offsets_virtio_transport_alloc_pkt {};
+
+struct trace_event_data_offsets_virtio_transport_recv_pkt {};
+
+typedef void (*btf_trace_virtio_transport_alloc_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32);
+
+typedef void (*btf_trace_virtio_transport_recv_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32, __u32, __u32);
+
+struct vsock_loopback {
+ struct workqueue_struct *workqueue;
+ spinlock_t pkt_list_lock;
+ struct list_head pkt_list;
+ struct work_struct pkt_work;
+};
+
+struct switchdev_notifier_port_obj_info {
+ struct switchdev_notifier_info info;
+ const struct switchdev_obj *obj;
+ struct switchdev_trans *trans;
+ bool handled;
+};
+
+typedef void switchdev_deferred_func_t(struct net_device *, const void *);
+
+struct switchdev_deferred_item {
+ struct list_head list;
+ struct net_device *dev;
+ switchdev_deferred_func_t *func;
+ long unsigned int data[0];
+};
+
+struct sockaddr_xdp {
+ __u16 sxdp_family;
+ __u16 sxdp_flags;
+ __u32 sxdp_ifindex;
+ __u32 sxdp_queue_id;
+ __u32 sxdp_shared_umem_fd;
+};
+
+struct xdp_ring_offset {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+ __u64 flags;
+};
+
+struct xdp_mmap_offsets {
+ struct xdp_ring_offset rx;
+ struct xdp_ring_offset tx;
+ struct xdp_ring_offset fr;
+ struct xdp_ring_offset cr;
+};
+
+struct xdp_umem_reg {
+ __u64 addr;
+ __u64 len;
+ __u32 chunk_size;
+ __u32 headroom;
+ __u32 flags;
+};
+
+struct xdp_statistics {
+ __u64 rx_dropped;
+ __u64 rx_invalid_descs;
+ __u64 tx_invalid_descs;
+};
+
+struct xdp_options {
+ __u32 flags;
+};
+
+struct xdp_desc {
+ __u64 addr;
+ __u32 len;
+ __u32 options;
+};
+
+struct xdp_ring;
+
+struct xsk_queue {
+ u32 ring_mask;
+ u32 nentries;
+ u32 cached_prod;
+ u32 cached_cons;
+ struct xdp_ring *ring;
+ u64 invalid_descs;
+};
+
+struct xdp_ring_offset_v1 {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+struct xdp_mmap_offsets_v1 {
+ struct xdp_ring_offset_v1 rx;
+ struct xdp_ring_offset_v1 tx;
+ struct xdp_ring_offset_v1 fr;
+ struct xdp_ring_offset_v1 cr;
+};
+
+struct xsk_map_node {
+ struct list_head node;
+ struct xsk_map *map;
+ struct xdp_sock **map_entry;
+};
+
+struct xdp_ring {
+ u32 producer;
+ long: 32;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ u32 consumer;
+ u32 flags;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+ long: 64;
+};
+
+struct xdp_rxtx_ring {
+ struct xdp_ring ptrs;
+ struct xdp_desc desc[0];
+};
+
+struct xdp_umem_ring {
+ struct xdp_ring ptrs;
+ u64 desc[0];
+};
+
+struct xdp_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_show;
+ __u32 xdiag_cookie[2];
+};
+
+struct xdp_diag_msg {
+ __u8 xdiag_family;
+ __u8 xdiag_type;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_cookie[2];
+};
+
+enum {
+ XDP_DIAG_NONE = 0,
+ XDP_DIAG_INFO = 1,
+ XDP_DIAG_UID = 2,
+ XDP_DIAG_RX_RING = 3,
+ XDP_DIAG_TX_RING = 4,
+ XDP_DIAG_UMEM = 5,
+ XDP_DIAG_UMEM_FILL_RING = 6,
+ XDP_DIAG_UMEM_COMPLETION_RING = 7,
+ XDP_DIAG_MEMINFO = 8,
+ __XDP_DIAG_MAX = 9,
+};
+
+struct xdp_diag_info {
+ __u32 ifindex;
+ __u32 queue_id;
+};
+
+struct xdp_diag_ring {
+ __u32 entries;
+};
+
+struct xdp_diag_umem {
+ __u64 size;
+ __u32 id;
+ __u32 num_pages;
+ __u32 chunk_size;
+ __u32 headroom;
+ __u32 ifindex;
+ __u32 queue_id;
+ __u32 flags;
+ __u32 refs;
+};
+
+struct pcibios_fwaddrmap {
+ struct list_head list;
+ struct pci_dev *dev;
+ resource_size_t fw_addr[11];
+};
+
+struct pci_check_idx_range {
+ int start;
+ int end;
+};
+
+struct pci_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_sysdata sd;
+};
+
+struct irq_info___2 {
+ u8 bus;
+ u8 devfn;
+ struct {
+ u8 link;
+ u16 bitmap;
+ } __attribute__((packed)) irq[4];
+ u8 slot;
+ u8 rfu;
+};
+
+struct irq_routing_table {
+ u32 signature;
+ u16 version;
+ u16 size;
+ u8 rtr_bus;
+ u8 rtr_devfn;
+ u16 exclusive_irqs;
+ u16 rtr_vendor;
+ u16 rtr_device;
+ u32 miniport_data;
+ u8 rfu[11];
+ u8 checksum;
+ struct irq_info___2 slots[0];
+};
+
+struct irq_router {
+ char *name;
+ u16 vendor;
+ u16 device;
+ int (*get)(struct pci_dev *, struct pci_dev *, int);
+ int (*set)(struct pci_dev *, struct pci_dev *, int, int);
+};
+
+struct irq_router_handler {
+ u16 vendor;
+ int (*probe)(struct irq_router *, struct pci_dev *, u16);
+};
+
+struct pci_setup_rom {
+ struct setup_data data;
+ uint16_t vendor;
+ uint16_t devid;
+ uint64_t pcilen;
+ long unsigned int segment;
+ long unsigned int bus;
+ long unsigned int device;
+ long unsigned int function;
+ uint8_t romdata[0];
+};
+
+enum pci_bf_sort_state {
+ pci_bf_sort_default = 0,
+ pci_force_nobf = 1,
+ pci_force_bf = 2,
+ pci_dmi_bf = 3,
+};
+
+struct pci_root_res {
+ struct list_head list;
+ struct resource res;
+};
+
+struct pci_root_info___2 {
+ struct list_head list;
+ char name[12];
+ struct list_head resources;
+ struct resource busn;
+ int node;
+ int link;
+};
+
+struct amd_hostbridge {
+ u32 bus;
+ u32 slot;
+ u32 device;
+};
+
+struct bpf_timer {
+ u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_dynptr {
+ __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+#endif
diff --git a/pkg/collector/bpf/lib/bpf_cgroup.h b/pkg/collector/bpf/lib/bpf_cgroup.h
new file mode 100644
index 00000000..9480c690
--- /dev/null
+++ b/pkg/collector/bpf/lib/bpf_cgroup.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+// Nicked a lot of utility functions from https://github.com/cilium/tetragon project
+
+#include "bpf_helpers.h"
+#include "config.h"
+
+#define NULL ((void *)0)
+
+#ifndef CGROUP_SUPER_MAGIC
+#define CGROUP_SUPER_MAGIC 0x27e0eb /* Cgroupv1 pseudo FS */
+#endif
+
+#ifndef CGROUP2_SUPER_MAGIC
+#define CGROUP2_SUPER_MAGIC 0x63677270 /* Cgroupv2 pseudo FS */
+#endif
+
+/* Msg flags */
+#define EVENT_ERROR_CGROUP_NAME 0x010000
+#define EVENT_ERROR_CGROUP_KN 0x020000
+#define EVENT_ERROR_CGROUP_SUBSYSCGRP 0x040000
+#define EVENT_ERROR_CGROUP_SUBSYS 0x080000
+#define EVENT_ERROR_CGROUPS 0x100000
+#define EVENT_ERROR_CGROUP_ID 0x200000
+
+/* Represent old kernfs node with the kernfs_node_id
+ * union to read the id in 5.4 kernels and older
+ */
+struct kernfs_node___old {
+ union kernfs_node_id id;
+};
+
+/**
+ * __get_cgroup_kn() Returns the kernfs_node of the cgroup
+ * @cgrp: target cgroup
+ *
+ * Returns the kernfs_node of the cgroup on success, NULL on failures.
+ */
+FUNC_INLINE struct kernfs_node *__get_cgroup_kn(const struct cgroup *cgrp)
+{
+ struct kernfs_node *kn = NULL;
+
+ if (cgrp)
+ bpf_probe_read(&kn, sizeof(cgrp->kn), _(&cgrp->kn));
+
+ return kn;
+}
+
+/**
+ * get_cgroup_kn_id() Returns the kernfs node id
+ * @kernfs_node: target kernfs node
+ *
+ * Returns the kernfs node id on success, zero on failures.
+ */
+FUNC_INLINE __u64 __get_cgroup_kn_id(const struct kernfs_node *kn)
+{
+ __u64 id = 0;
+
+ if (!kn)
+ return id;
+
+ /* Kernels prior to 5.5 have the kernfs_node_id, but distros (RHEL)
+ * seem to have kernfs_node_id defined for UAPI reasons even though
+ * its not used here directly. To resolve this walk struct for id.id
+ */
+ if (bpf_core_field_exists(((struct kernfs_node___old *)0)->id.id)) {
+ struct kernfs_node___old *old_kn;
+
+ old_kn = (void *)kn;
+ if (BPF_CORE_READ_INTO(&id, old_kn, id.id) != 0)
+ return 0;
+ } else {
+ bpf_probe_read(&id, sizeof(id), _(&kn->id));
+ }
+
+ return id;
+}
+
+/**
+ * get_cgroup_id() Returns cgroup id
+ * @cgrp: target cgroup
+ *
+ * Returns the cgroup id of the target cgroup on success, zero on failures.
+ */
+FUNC_INLINE __u64 get_cgroup_id(const struct cgroup *cgrp)
+{
+ struct kernfs_node *kn;
+
+ kn = __get_cgroup_kn(cgrp);
+ return __get_cgroup_kn_id(kn);
+}
+
+/**
+ * get_task_cgroup() Returns the accurate or desired cgroup of the css of
+ * current task that we want to operate on.
+ * @task: must be current task.
+ * @subsys_idx: index of the desired cgroup_subsys_state part of css_set.
+ * Passing a zero as a subsys_idx is fine assuming you want that.
+ * @error_flags: error flags that will be ORed to indicate errors on
+ * failures.
+ *
+ * Returns the cgroup of the css part of css_set of current task and is
+ * indexed at subsys_idx on success. NULL on failures, and the error_flags
+ * will be ORed to indicate the corresponding error.
+ *
+ * To get cgroup and kernfs node information we want to operate on the right
+ * cgroup hierarchy which is setup by user space. However due to the
+ * incompatibility between cgroup v1 and v2; how user space initialize and
+ * install cgroup controllers, etc, it can be difficult.
+ *
+ * Use this helper and pass the css index that you consider accurate and
+ * which can be discovered at runtime in user space.
+ * Usually it is the 'memory' or 'pids' indexes by reading /proc/cgroups
+ * file where each line number is the index starting from zero without
+ * counting first comment line.
+ */
+FUNC_INLINE struct cgroup *
+get_task_cgroup(struct task_struct *task, __u32 subsys_idx, __u32 *error_flags)
+{
+ struct cgroup_subsys_state *subsys;
+ struct css_set *cgroups;
+ struct cgroup *cgrp = NULL;
+
+ bpf_probe_read(&cgroups, sizeof(cgroups), _(&task->cgroups));
+ if (unlikely(!cgroups)) {
+ *error_flags |= EVENT_ERROR_CGROUPS;
+ return cgrp;
+ }
+
+ /* We are interested only in the cpuset, memory or pids controllers
+ * which are indexed at 0, 4 and 11 respectively assuming all controllers
+ * are compiled in.
+ * When we use the controllers indexes we will first discover these indexes
+ * dynamically in user space which will work on all setups from reading
+ * file: /proc/cgroups. If we fail to discover the indexes then passing
+ * a default index zero should be fine assuming we also want that.
+ *
+ * Reference: https://elixir.bootlin.com/linux/v5.19/source/include/linux/cgroup_subsys.h
+ *
+ * Notes:
+ * Newer controllers should be appended at the end. controllers
+ * that are not upstreamed may mess the calculation here
+ * especially if they happen to be before the desired subsys_idx,
+ * we fail.
+ */
+ if (unlikely(subsys_idx > pids_cgrp_id)) {
+ *error_flags |= EVENT_ERROR_CGROUP_SUBSYS;
+ return cgrp;
+ }
+
+ /* Read css from the passed subsys index to ensure that we operate
+ * on the desired controller. This allows user space to be flexible
+ * and chose the right per cgroup subsystem to use in order to
+ * support as much as workload as possible. It also reduces errors
+ * in a significant way.
+ */
+ bpf_probe_read(&subsys, sizeof(subsys), _(&cgroups->subsys[subsys_idx]));
+ if (unlikely(!subsys)) {
+ *error_flags |= EVENT_ERROR_CGROUP_SUBSYS;
+ return cgrp;
+ }
+
+ bpf_probe_read(&cgrp, sizeof(cgrp), _(&subsys->cgroup));
+ if (!cgrp)
+ *error_flags |= EVENT_ERROR_CGROUP_SUBSYSCGRP;
+
+ return cgrp;
+}
+
+/**
+ * ceems_get_current_cgroupv1_id() Returns the accurate cgroup id of current task running
+ * under cgroups v1.
+ *
+ * Returns the cgroup id of current task on success, zero on failures.
+ */
+FUNC_INLINE __u64 ceems_get_current_cgroupv1_id(int subsys_idx)
+{
+ __u32 error_flags;
+ struct cgroup *cgrp;
+ struct task_struct *task;
+
+ task = (struct task_struct *)bpf_get_current_task();
+
+ // NB: error_flags are ignored for now
+ cgrp = get_task_cgroup(task, subsys_idx, &error_flags);
+ if (!cgrp)
+ return 0;
+
+ return get_cgroup_id(cgrp);
+}
+
+
+/**
+ * ceems_get_current_cgroup_id() Returns the accurate cgroup id of current task.
+ *
+ * Returns the cgroup id of current task on success, zero on failures.
+ */
+FUNC_INLINE __u64 ceems_get_current_cgroup_id(void)
+{
+ __u64 cgrpfs_magic = 0;
+ struct conf *cfg;
+ int zero = 0, subsys_idx = 1;
+
+ cfg = bpf_map_lookup_elem(&conf_map, &zero);
+ if (cfg) {
+ /* Select which cgroup version */
+ cgrpfs_magic = cfg->cgrp_fs_magic;
+ /* Select which cgroup subsystem */
+ subsys_idx = cfg->cgrp_subsys_idx;
+ }
+
+ /*
+ * Try the bpf helper on the default hierarchy if available
+ * and if we are running in unified cgroupv2
+ */
+ if (cgrpfs_magic == CGROUP2_SUPER_MAGIC) {
+ return bpf_get_current_cgroup_id();
+ }
+
+ return ceems_get_current_cgroupv1_id(subsys_idx);
+}
diff --git a/pkg/collector/bpf/lib/bpf_helpers.h b/pkg/collector/bpf/lib/bpf_helpers.h
new file mode 100644
index 00000000..b461733d
--- /dev/null
+++ b/pkg/collector/bpf/lib/bpf_helpers.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef __BPF_HELPERS__
+#define __BPF_HELPERS__
+
+/*
+ * Note that bpf programs need to include either
+ * vmlinux.h (auto-generated from BTF) or linux/types.h
+ * in advance since bpf_helper_defs.h uses such types
+ * as __u64.
+ */
+#include "bpf_helper_defs.h"
+
+#define __uint(name, val) int (*name)[val]
+#define __type(name, val) typeof(val) *name
+#define __array(name, val) typeof(val) *name[]
+
+/*
+ * Helper macro to place programs, maps, license in
+ * different sections in elf_bpf file. Section names
+ * are interpreted by libbpf depending on the context (BPF programs, BPF maps,
+ * extern variables, etc).
+ * To allow use of SEC() with externs (e.g., for extern .maps declarations),
+ * make sure __attribute__((unused)) doesn't trigger compilation warning.
+ */
+#define SEC(name) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
+ __attribute__((section(name), used)) \
+ _Pragma("GCC diagnostic pop") \
+
+/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
+#undef __always_inline
+#define __always_inline inline __attribute__((always_inline))
+
+#ifndef __noinline
+#define __noinline __attribute__((noinline))
+#endif
+#ifndef __weak
+#define __weak __attribute__((weak))
+#endif
+
+/*
+ * Use __hidden attribute to mark a non-static BPF subprogram effectively
+ * static for BPF verifier's verification algorithm purposes, allowing more
+ * extensive and permissive BPF verification process, taking into account
+ * subprogram's caller context.
+ */
+#define __hidden __attribute__((visibility("hidden")))
+
+/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
+ * any system-level headers (such as stddef.h, linux/version.h, etc), and
+ * commonly-used macros like NULL and KERNEL_VERSION aren't available through
+ * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
+ * them on their own. So as a convenience, provide such definitions here.
+ */
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
+#endif
+
+/*
+ * Helper macros to manipulate data structures
+ */
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
+#endif
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+/*
+ * Helper macro to throw a compilation error if __bpf_unreachable() gets
+ * built into the resulting code. This works given BPF back end does not
+ * implement __builtin_trap(). This is useful to assert that certain paths
+ * of the program code are never used and hence eliminated by the compiler.
+ *
+ * For example, consider a switch statement that covers known cases used by
+ * the program. __bpf_unreachable() can then reside in the default case. If
+ * the program gets extended such that a case is not covered in the switch
+ * statement, then it will throw a build error due to the default case not
+ * being compiled out.
+ */
+#ifndef __bpf_unreachable
+# define __bpf_unreachable() __builtin_trap()
+#endif
+
+/*
+ * Helper function to perform a tail call with a constant/immediate map slot.
+ */
+#if __clang_major__ >= 8 && defined(__bpf__)
+static __always_inline void
+bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
+{
+ if (!__builtin_constant_p(slot))
+ __bpf_unreachable();
+
+ /*
+ * Provide a hard guarantee that LLVM won't optimize setting r2 (map
+ * pointer) and r3 (constant map index) from _different paths_ ending
+ * up at the _same_ call insn as otherwise we won't be able to use the
+ * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
+ * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
+ * tracking for prog array pokes") for details on verifier tracking.
+ *
+ * Note on clobber list: we need to stay in-line with BPF calling
+ * convention, so even if we don't end up using r0, r4, r5, we need
+ * to mark them as clobber so that LLVM doesn't end up using them
+ * before / after the call.
+ */
+ asm volatile("r1 = %[ctx]\n\t"
+ "r2 = %[map]\n\t"
+ "r3 = %[slot]\n\t"
+ "call 12"
+ :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
+ : "r0", "r1", "r2", "r3", "r4", "r5");
+}
+#endif
+
+/*
+ * Helper structure used by eBPF C program
+ * to describe BPF map attributes to libbpf loader
+ */
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
+};
+
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
+enum libbpf_tristate {
+ TRI_NO = 0,
+ TRI_YES = 1,
+ TRI_MODULE = 2,
+};
+
+#define __kconfig __attribute__((section(".kconfig")))
+#define __ksym __attribute__((section(".ksyms")))
+
+#ifndef ___bpf_concat
+#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#endif
+#ifndef ___bpf_narg
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+
+#define ___bpf_fill0(arr, p, x) do {} while (0)
+#define ___bpf_fill1(arr, p, x) arr[p] = x
+#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
+#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
+#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
+#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
+#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
+#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
+#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
+#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
+#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
+#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
+#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
+#define ___bpf_fill(arr, args...) \
+ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
+
+/*
+ * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
+ * in a structure.
+ */
+#define BPF_SEQ_PRINTF(seq, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/*
+ * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
+ * an array of u64.
+ */
+#define BPF_SNPRINTF(out, out_size, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_snprintf(out, out_size, ___fmt, \
+ ___param, sizeof(___param)); \
+})
+
+#ifdef BPF_NO_GLOBAL_DATA
+#define BPF_PRINTK_FMT_MOD
+#else
+#define BPF_PRINTK_FMT_MOD static const
+#endif
+
+#define __bpf_printk(fmt, ...) \
+({ \
+ BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+/*
+ * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
+ * instead of an array of u64.
+ */
+#define __bpf_vprintk(fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_trace_vprintk(___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
+ * Otherwise use __bpf_vprintk
+ */
+#define ___bpf_pick_printk(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
+ __bpf_printk /*1*/, __bpf_printk /*0*/)
+
+/* Helper macro to print out debug messages */
+#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
+
+#endif
diff --git a/pkg/collector/bpf/lib/bpf_path.h b/pkg/collector/bpf/lib/bpf_path.h
new file mode 100644
index 00000000..c96986c8
--- /dev/null
+++ b/pkg/collector/bpf/lib/bpf_path.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+// Nicked a lot of utility functions from https://github.com/cilium/tetragon project
+
+#ifndef _BPF_VFS_EVENT__
+#define _BPF_VFS_EVENT__
+
+/* __d_path_local flags */
+// #define UNRESOLVED_MOUNT_POINTS 0x01 // (deprecated)
+// this error is returned by __d_path_local in the following cases:
+// - the path walk did not conclude (too many dentry)
+// - the path was too long to fit in the buffer
+#define UNRESOLVED_PATH_COMPONENTS 0x02
+
+#define PROBE_MNT_ITERATIONS 8
+#define ENAMETOOLONG 36 /* File name too long */
+#define MAX_BUF_LEN 4096
+
+/* buffer in the heap */
+struct buffer_heap_map_value {
+ unsigned char buf[MAX_BUF_LEN + 256];
+};
+
+/* per CPU buffer map for storing resolved mount path */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct buffer_heap_map_value);
+} buffer_heap_map SEC(".maps");
+
+/* struct to keep mount path data */
+struct mnt_path_data {
+ char *bf;
+ struct mount *mnt;
+ struct dentry *prev_de;
+ char *bptr;
+ int blen;
+ bool resolved;
+};
+
+#define offsetof_btf(s, memb) ((size_t)((char *)_(&((s *)0)->memb) - (char *)0))
+
+#define container_of_btf(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof_btf(type, member))); \
+ })
+
+/**
+ * real_mount() returns real mount path of the current vfsmount.
+ * @mnt: pointer to vfsmount of the path
+ *
+ * Returns pointer to mnt of real mount path.
+ */
+FUNC_INLINE struct mount* real_mount(struct vfsmount *mnt)
+{
+ return container_of_btf(mnt, struct mount, mnt);
+}
+
+/**
+ * IS_ROOT() returns true if the current path reached root level.
+ * @dentry: pointer to dentry of the path
+ *
+ * Returns true if current dentry is the root level.
+ */
+FUNC_INLINE bool IS_ROOT(struct dentry *dentry)
+{
+ struct dentry *d_parent;
+
+ bpf_probe_read(&d_parent, sizeof(d_parent), _(&dentry->d_parent));
+
+ return (dentry == d_parent);
+}
+
+/**
+ * prepend_name() prepends the mount path name for current path.
+ * @buf: buffer where name will be prepended
+ * @bufptr: pointer to the buf
+ * @buflen: current buffer length
+ * @name: name to be prepended to the buffer
+ * @namelen: length of name
+ *
+ * Returns 0 on update and -ENAMETOOLONG on errors.
+ */
+FUNC_INLINE int prepend_name(char *buf, char **bufptr, int *buflen, const unsigned char *name, int namelen)
+{
+ // contains 1 if the buffer is large enough to contain the whole name and a slash prefix
+ bool write_slash = 1;
+
+ // Ensure namelen will not overflow. To make verifier happy
+ if (namelen < 0 || namelen > 256)
+ return -ENAMETOOLONG;
+
+ s64 buffer_offset = (s64)(*bufptr) - (s64)buf;
+
+ // Change name and namelen to fit in the buffer.
+ // We prefer to store the part of it that fits rather than discard it.
+ if (namelen >= *buflen) {
+ name += namelen - *buflen;
+ namelen = *buflen;
+ write_slash = 0;
+ }
+
+ *buflen -= (namelen + write_slash);
+
+ if (namelen + write_slash > buffer_offset)
+ return -ENAMETOOLONG;
+
+ buffer_offset -= (namelen + write_slash);
+
+ // This will never happen. buffer_offset is the diff of the initial buffer pointer
+ // with the current buffer pointer. This will be at max 4096 bytes (similar to the initial
+ // size).
+ // Needed to bound that for bpf_probe_read call.
+ if (buffer_offset < 0 || buffer_offset >= MAX_BUF_LEN)
+ return -ENAMETOOLONG;
+
+ if (write_slash)
+ buf[buffer_offset] = '/';
+
+ // This ensures that namelen is < 256, which is aligned with kernel's max dentry name length
+ // that is 255 (https://elixir.bootlin.com/linux/v5.10/source/include/uapi/linux/limits.h#L12).
+ // Needed to bound that for probe_read call.
+ asm volatile("%[namelen] &= 0xff;\n"
+ : [namelen] "+r"(namelen));
+ bpf_probe_read(buf + buffer_offset + write_slash, namelen * sizeof(const unsigned char), name);
+
+ *bufptr = buf + buffer_offset;
+
+ return write_slash ? 0 : -ENAMETOOLONG;
+}
+
+/**
+ * mnt_path_read() updates path buffer with current mount path.
+ * @data: pointer to mount path data for current recursion level
+ *
+ * Returns 0 on update and 1 on successful path resolution and any errors.
+ */
+FUNC_INLINE long mnt_path_read(struct mnt_path_data *data)
+{
+ struct dentry *curr_de;
+ struct dentry *prev_de = data->prev_de;
+ struct mount *mnt = data->mnt;
+ struct mount *mnt_parent;
+ const unsigned char *name;
+ int len;
+ int error;
+
+ bpf_probe_read(&curr_de, sizeof(curr_de), _(&mnt->mnt_mountpoint));
+
+ /* Global root? */
+ if (curr_de == prev_de || IS_ROOT(curr_de)) {
+
+ // resolved all path components successfully
+ data->resolved = true;
+
+ return 1;
+ }
+ bpf_probe_read(&name, sizeof(name), _(&curr_de->d_name.name));
+ bpf_probe_read(&len, sizeof(len), _(&curr_de->d_name.len));
+ bpf_probe_read(&mnt_parent, sizeof(mnt_parent), _(&mnt->mnt_parent));
+
+ error = prepend_name(data->bf, &data->bptr, &data->blen, name, len);
+ // This will happen where the dentry name does not fit in the buffer.
+ // We will stop the loop with resolved == false and later we will
+ // set the proper value in error before function return.
+ if (error)
+ return 1;
+
+ data->prev_de = curr_de;
+ data->mnt = mnt_parent;
+
+ return 0;
+}
+
+/**
+ * Convience wrapper for mnt_path_read() to be used in BPF loop helper
+ */
+#if defined(__KERNEL_POST_v62)
+static long mnt_path_read_v61(__u32 index, void *data)
+{
+ return mnt_path_read(data);
+}
+#endif
+
+/**
+ * prepend_mnt_path() returns the mount path of the file.
+ * @file: pointer to a file that we want to resolve
+ * @buf: buffer where the path will be stored (this should be always the value of 'buffer_heap_map' map)
+ * @buflen: available buffer size to store the path (now 256 in all cases, maybe we can increase that further)
+ *
+ * Returns error code on failures.
+ */
+FUNC_INLINE int prepend_mnt_path(struct file *file, char *bf, char **buffer, int *buflen)
+{
+ struct mnt_path_data data = {
+ .bf = bf,
+ .bptr = *buffer,
+ .blen = *buflen,
+ .prev_de = NULL,
+ };
+ int error = 0;
+ struct vfsmount *vfsmnt;
+
+ bpf_probe_read(&vfsmnt, sizeof(vfsmnt), _(&file->f_path.mnt));
+ data.mnt = real_mount(vfsmnt);
+
+#if defined(__KERNEL_POST_v62)
+ bpf_loop(PROBE_MNT_ITERATIONS, mnt_path_read_v61, (void *)&data, 0);
+#else
+#pragma unroll
+ for (int i = 0; i < PROBE_MNT_ITERATIONS; ++i) {
+ if (mnt_path_read(&data))
+ break;
+ }
+#endif /* __KERNEL_POST_v62 */
+
+ if (data.bptr == *buffer) {
+ *buflen = 0;
+
+ return 0;
+ }
+
+ if (!data.resolved)
+ error = UNRESOLVED_PATH_COMPONENTS;
+
+ *buffer = data.bptr;
+ *buflen = data.blen;
+
+ return error;
+}
+
+/**
+ * __mnt_path_local() returns the mount path of the file.
+ * @file: pointer to a file that we want to resolve
+ * @buf: buffer where the path will be stored (this should be always the value of 'buffer_heap_map' map)
+ * @buflen: available buffer size to store the path (now 256 in all cases, maybe we can increase that further)
+ *
+ * Input buffer layout:
+ * <-- buflen -->
+ * -----------------------------
+ * | |
+ * -----------------------------
+ * ^
+ * |
+ * buf
+ *
+ *
+ * Output variables:
+ * - 'buf' is where the path is stored (>= compared to the input argument)
+ * - 'buflen' the size of the resolved path (0 < buflen <= 256). Will not be negative. If buflen == 0 nothing is written to the buffer.
+ * - 'error' 0 in case of success or UNRESOLVED_PATH_COMPONENTS in the case where the path is larger than the provided buffer.
+ *
+ * Output buffer layout:
+ * <-- buflen -->
+ * -----------------------------
+ * | /etc/passwd|
+ * -----------------------------
+ * ^
+ * |
+ * buf
+ *
+ * ps. The size of the path will be (initial value of buflen) - (return value of buflen) if (buflen != 0)
+ */
+FUNC_INLINE char* __mnt_path_local(struct file *file, char *buf, int *buflen, int *error)
+{
+ char *res = buf + *buflen;
+
+ *error = prepend_mnt_path(file, buf, &res, buflen);
+
+ return res;
+}
+
+/**
+ * Entry point for mount path resolution.
+ *
+ * This function allocates a buffer from 'buffer_heap_map' map and calls
+ * __mnt_path_local. After __mnt_path_local returns, it also does the appropriate
+ * calculations on the buffer size (check __mnt_path_local comment).
+ *
+ * Returns the buffer where the path is stored. 'buflen' is the size of the
+ * resolved path (0 < buflen <= 256) and will not be negative. If buflen == 0
+ * nothing is written to the buffer (still the value to the buffer is valid).
+ * 'error' is 0 in case of success or UNRESOLVED_PATH_COMPONENTS in the case
+ * where the path is larger than the provided buffer.
+ */
+FUNC_INLINE char* mnt_path_local(struct file *file, int *buflen, int *error)
+{
+ int zero = 0;
+ char *buffer = 0;
+
+ buffer = bpf_map_lookup_elem(&buffer_heap_map, &zero);
+ if (!buffer)
+ return 0;
+
+ *buflen = MAX_BUF_LEN;
+ buffer = __mnt_path_local(file, buffer, buflen, error);
+ if (*buflen > 0)
+ *buflen = MAX_BUF_LEN - *buflen;
+
+ return buffer;
+}
+
+#endif
diff --git a/pkg/collector/bpf/lib/config.h b/pkg/collector/bpf/lib/config.h
new file mode 100644
index 00000000..81a19381
--- /dev/null
+++ b/pkg/collector/bpf/lib/config.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef __CONF_
+#define __CONF_
+
+/* Runtime configuration */
+struct conf {
+ __u64 cgrp_subsys_idx; /* Tracked cgroup subsystem state index at compile time */
+ __u64 cgrp_fs_magic; /* Cgroupv1 or Cgroupv2 */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct conf);
+} conf_map SEC(".maps");
+
+#endif // __CONF_
diff --git a/pkg/collector/bpf/libbpf/bpf_core_read.h b/pkg/collector/bpf/libbpf/bpf_core_read.h
new file mode 100644
index 00000000..f013f1c8
--- /dev/null
+++ b/pkg/collector/bpf/libbpf/bpf_core_read.h
@@ -0,0 +1,484 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef __BPF_CORE_READ_H__
+#define __BPF_CORE_READ_H__
+
+/*
+ * enum bpf_field_info_kind is passed as a second argument into
+ * __builtin_preserve_field_info() built-in to get a specific aspect of
+ * a field, captured as a first argument. __builtin_preserve_field_info(field,
+ * info_kind) returns __u32 integer and produces BTF field relocation, which
+ * is understood and processed by libbpf during BPF object loading. See
+ * selftests/bpf for examples.
+ */
+enum bpf_field_info_kind {
+ BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_FIELD_SIGNED = 3,
+ BPF_FIELD_LSHIFT_U64 = 4,
+ BPF_FIELD_RSHIFT_U64 = 5,
+};
+
+/* second argument to __builtin_btf_type_id() built-in */
+enum bpf_type_id_kind {
+ BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */
+ BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */
+};
+
+/* second argument to __builtin_preserve_type_info() built-in */
+enum bpf_type_info_kind {
+ BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
+ BPF_TYPE_SIZE = 1, /* type size in target kernel */
+ BPF_TYPE_MATCHES = 2, /* type match in target kernel */
+};
+
+/* second argument to __builtin_preserve_enum_value() built-in */
+enum bpf_enum_value_kind {
+ BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */
+ BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */
+};
+
+#define __CORE_RELO(src, field, info) \
+ __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
+ bpf_probe_read_kernel( \
+ (void *)dst, \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+#else
+/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
+ * for big-endian we need to adjust destination pointer accordingly, based on
+ * field byte size
+ */
+#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
+ bpf_probe_read_kernel( \
+ (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+#endif
+
+/*
+ * Extract bitfield, identified by s->field, and return its value as u64.
+ * All this is done in relocatable manner, so bitfield changes such as
+ * signedness, bit size, offset changes, this will be handled automatically.
+ * This version of macro is using bpf_probe_read_kernel() to read underlying
+ * integer storage. Macro functions as an expression and its return type is
+ * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
+ */
+#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
+ unsigned long long val = 0; \
+ \
+ __CORE_BITFIELD_PROBE_READ(&val, s, field); \
+ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
+ if (__CORE_RELO(s, field, SIGNED)) \
+ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
+ else \
+ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
+ val; \
+})
+
+/*
+ * Extract bitfield, identified by s->field, and return its value as u64.
+ * This version of macro is using direct memory reads and should be used from
+ * BPF program types that support such functionality (e.g., typed raw
+ * tracepoints).
+ */
+#define BPF_CORE_READ_BITFIELD(s, field) ({ \
+ const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
+ unsigned long long val; \
+ \
+ /* This is a so-called barrier_var() operation that makes specified \
+ * variable "a black box" for optimizing compiler. \
+ * It forces compiler to perform BYTE_OFFSET relocation on p and use \
+ * its calculated value in the switch below, instead of applying \
+ * the same relocation 4 times for each individual memory load. \
+ */ \
+ asm volatile("" : "=r"(p) : "0"(p)); \
+ \
+ switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
+ case 1: val = *(const unsigned char *)p; break; \
+ case 2: val = *(const unsigned short *)p; break; \
+ case 4: val = *(const unsigned int *)p; break; \
+ case 8: val = *(const unsigned long long *)p; break; \
+ } \
+ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
+ if (__CORE_RELO(s, field, SIGNED)) \
+ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
+ else \
+ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
+ val; \
+})
+
+#define ___bpf_field_ref1(field) (field)
+#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref(args...) \
+ ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
+
+/*
+ * Convenience macro to check that field actually exists in target kernel's.
+ * Returns:
+ * 1, if matching field is present in target kernel;
+ * 0, if no matching field found.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_exists(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_exists(struct my_type, my_field).
+ */
+#define bpf_core_field_exists(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS)
+
+/*
+ * Convenience macro to get the byte size of a field. Works for integers,
+ * struct/unions, pointers, arrays, and enums.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_size(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_size(struct my_type, my_field).
+ */
+#define bpf_core_field_size(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE)
+
+/*
+ * Convenience macro to get field's byte offset.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_offset(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_offset(struct my_type, my_field).
+ */
+#define bpf_core_field_offset(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET)
+
+/*
+ * Convenience macro to get BTF type ID of a specified type, using a local BTF
+ * information. Return 32-bit unsigned integer with type ID from program's own
+ * BTF. Always succeeds.
+ */
+#define bpf_core_type_id_local(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+
+/*
+ * Convenience macro to get BTF type ID of a target kernel's type that matches
+ * specified local type.
+ * Returns:
+ * - valid 32-bit unsigned type ID in kernel BTF;
+ * - 0, if no matching type was found in a target kernel BTF.
+ */
+#define bpf_core_type_id_kernel(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+
+/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) exists in a target kernel.
+ * Returns:
+ * 1, if such type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_exists(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+
+/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) "matches" that in a target kernel.
+ * Returns:
+ * 1, if the type matches in the target kernel's BTF;
+ * 0, if the type does not match any in the target kernel
+ */
+#define bpf_core_type_matches(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+
+/*
+ * Convenience macro to get the byte size of a provided named type
+ * (struct/union/enum/typedef) in a target kernel.
+ * Returns:
+ * >= 0 size (in bytes), if type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_size(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+
+/*
+ * Convenience macro to check that provided enumerator value is defined in
+ * a target kernel.
+ * Returns:
+ * 1, if specified enum type and its enumerator value are present in target
+ * kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
+ */
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+
+/*
+ * Convenience macro to get the integer value of an enumerator value in
+ * a target kernel.
+ * Returns:
+ * 64-bit value, if specified enum type and its enumerator value are
+ * present in target kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
+ */
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
+
+/*
+ * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
+ * offset relocation for source address using __builtin_preserve_access_index()
+ * built-in, provided by Clang.
+ *
+ * __builtin_preserve_access_index() takes as an argument an expression of
+ * taking an address of a field within struct/union. It makes compiler emit
+ * a relocation, which records BTF type ID describing root struct/union and an
+ * accessor string which describes exact embedded field that was used to take
+ * an address. See detailed description of this relocation format and
+ * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ *
+ * This relocation allows libbpf to adjust BPF instruction to use correct
+ * actual field offset, based on target kernel BTF type that matches original
+ * (local) BTF, used to record relocation.
+ */
+#define bpf_core_read(dst, sz, src) \
+ bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src))
+
+/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
+#define bpf_core_read_user(dst, sz, src) \
+ bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src))
+/*
+ * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
+ * additionally emitting BPF CO-RE field relocation for specified source
+ * argument.
+ */
+#define bpf_core_read_str(dst, sz, src) \
+ bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+
+/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
+#define bpf_core_read_user_str(dst, sz, src) \
+ bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+
+#define ___concat(a, b) a ## b
+#define ___apply(fn, n) ___concat(fn, n)
+#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
+
+/*
+ * return number of provided arguments; used for switch-based variadic macro
+ * definitions (see ___last, ___arrow, etc below)
+ */
+#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/*
+ * return 0 if no arguments are passed, N - otherwise; used for
+ * recursively-defined macros to specify termination (0) case, and generic
+ * (N) case (e.g., ___read_ptrs, ___core_read)
+ */
+#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+
+#define ___last1(x) x
+#define ___last2(a, x) x
+#define ___last3(a, b, x) x
+#define ___last4(a, b, c, x) x
+#define ___last5(a, b, c, d, x) x
+#define ___last6(a, b, c, d, e, x) x
+#define ___last7(a, b, c, d, e, f, x) x
+#define ___last8(a, b, c, d, e, f, g, x) x
+#define ___last9(a, b, c, d, e, f, g, h, x) x
+#define ___last10(a, b, c, d, e, f, g, h, i, x) x
+#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___nolast2(a, _) a
+#define ___nolast3(a, b, _) a, b
+#define ___nolast4(a, b, c, _) a, b, c
+#define ___nolast5(a, b, c, d, _) a, b, c, d
+#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
+#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
+#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
+#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
+#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
+#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___arrow1(a) a
+#define ___arrow2(a, b) a->b
+#define ___arrow3(a, b, c) a->b->c
+#define ___arrow4(a, b, c, d) a->b->c->d
+#define ___arrow5(a, b, c, d, e) a->b->c->d->e
+#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
+#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
+#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
+#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
+#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
+#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
+
+#define ___type(...) typeof(___arrow(__VA_ARGS__))
+
+#define ___read(read_fn, dst, src_type, src, accessor) \
+ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
+
+/* "recursively" read a sequence of inner pointers using local __t var */
+#define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a);
+#define ___rd_last(fn, ...) \
+ ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
+#define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__)
+#define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___read_ptrs(fn, src, ...) \
+ ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__)
+
+#define ___core_read0(fn, fn_ptr, dst, src, a) \
+ ___read(fn, dst, ___type(src), src, a);
+#define ___core_readN(fn, fn_ptr, dst, src, ...) \
+ ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \
+ ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
+ ___last(__VA_ARGS__));
+#define ___core_read(fn, fn_ptr, dst, src, a, ...) \
+ ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \
+ src, a, ##__VA_ARGS__)
+
+/*
+ * BPF_CORE_READ_INTO() is a more performance-conscious variant of
+ * BPF_CORE_READ(), in which final field is read into user-provided storage.
+ * See BPF_CORE_READ() below for more details on general usage.
+ */
+#define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read, bpf_core_read, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Variant of BPF_CORE_READ_INTO() for reading from user-space memory.
+ *
+ * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
+ */
+#define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_user, bpf_core_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_INTO() */
+#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_USER_INTO().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_user, bpf_probe_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
+ * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
+ * corresponding error code) bpf_core_read_str() for final string read.
+ */
+#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_str, bpf_core_read, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory.
+ *
+ * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
+ */
+#define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_user_str, bpf_core_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
+#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
+ * when there are few pointer chasing steps.
+ * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
+ * int x = s->a.b.c->d.e->f->g;
+ * can be succinctly achieved using BPF_CORE_READ as:
+ * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
+ *
+ * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
+ * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
+ * equivalent to:
+ * 1. const void *__t = s->a.b.c;
+ * 2. __t = __t->d.e;
+ * 3. __t = __t->f;
+ * 4. return __t->g;
+ *
+ * Equivalence is logical, because there is a heavy type casting/preservation
+ * involved, as well as all the reads are happening through
+ * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
+ * emit CO-RE relocations.
+ *
+ * N.B. Only up to 9 "field accessors" are supported, which should be more
+ * than enough for any practical purpose.
+ */
+#define BPF_CORE_READ(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/*
+ * Variant of BPF_CORE_READ() for reading from user-space memory.
+ *
+ * NOTE: all the source types involved are still *kernel types* and need to
+ * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will
+ * fail. Custom user types are not relocatable with CO-RE.
+ * The typical situation in which BPF_CORE_READ_USER() might be used is to
+ * read kernel UAPI types from the user-space memory passed in as a syscall
+ * input argument.
+ */
+#define BPF_CORE_READ_USER(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ() */
+#define BPF_PROBE_READ(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/*
+ * Non-CO-RE variant of BPF_CORE_READ_USER().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+#endif
diff --git a/pkg/collector/bpf/libbpf/bpf_helper_defs.h b/pkg/collector/bpf/libbpf/bpf_helper_defs.h
new file mode 100644
index 00000000..9c9ca45b
--- /dev/null
+++ b/pkg/collector/bpf/libbpf/bpf_helper_defs.h
@@ -0,0 +1,4167 @@
+/* This is auto-generated file. See bpf_doc.py for details. */
+
+/* Forward declarations of BPF structs */
+struct bpf_fib_lookup;
+struct bpf_sk_lookup;
+struct bpf_perf_event_data;
+struct bpf_perf_event_value;
+struct bpf_pidns_info;
+struct bpf_redir_neigh;
+struct bpf_sock;
+struct bpf_sock_addr;
+struct bpf_sock_ops;
+struct bpf_sock_tuple;
+struct bpf_spin_lock;
+struct bpf_sysctl;
+struct bpf_tcp_sock;
+struct bpf_tunnel_key;
+struct bpf_xfrm_state;
+struct linux_binprm;
+struct pt_regs;
+struct sk_reuseport_md;
+struct sockaddr;
+struct tcphdr;
+struct seq_file;
+struct tcp6_sock;
+struct tcp_sock;
+struct tcp_timewait_sock;
+struct tcp_request_sock;
+struct udp6_sock;
+struct unix_sock;
+struct task_struct;
+struct __sk_buff;
+struct sk_msg_md;
+struct xdp_md;
+struct path;
+struct btf_ptr;
+struct inode;
+struct socket;
+struct file;
+struct bpf_timer;
+
+/*
+ * bpf_map_lookup_elem
+ *
+ * Perform a lookup in *map* for an entry associated to *key*.
+ *
+ * Returns
+ * Map value associated to *key*, or **NULL** if no entry was
+ * found.
+ */
+static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1;
+
+/*
+ * bpf_map_update_elem
+ *
+ * Add or update the value of the entry associated to *key* in
+ * *map* with *value*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * Flag value **BPF_NOEXIST** cannot be used for maps of types
+ * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
+ * elements always exist), the helper would return an error.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2;
+
+/*
+ * bpf_map_delete_elem
+ *
+ * Delete entry with *key* from *map*.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_map_delete_elem)(void *map, const void *key) = (void *) 3;
+
+/*
+ * bpf_probe_read
+ *
+ * For tracing programs, safely attempt to read *size* bytes from
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use **bpf_probe_read_user**\ () or
+ * **bpf_probe_read_kernel**\ () instead.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4;
+
+/*
+ * bpf_ktime_get_ns
+ *
+ * Return the time elapsed since system boot, in nanoseconds.
+ * Does not include time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
+ *
+ * Returns
+ * Current *ktime*.
+ */
+static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5;
+
+/*
+ * bpf_trace_printk
+ *
+ * This helper is a "printk()-like" facility for debugging. It
+ * prints a message defined by format *fmt* (of size *fmt_size*)
+ * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
+ * available. It can take up to three additional **u64**
+ * arguments (as an eBPF helpers, the total number of arguments is
+ * limited to five).
+ *
+ * Each time the helper is called, it appends a line to the trace.
+ * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
+ * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
+ * The format of the trace is customizable, and the exact output
+ * one will get depends on the options set in
+ * *\/sys/kernel/debug/tracing/trace_options* (see also the
+ * *README* file under the same directory). However, it usually
+ * defaults to something like:
+ *
+ * ::
+ *
+ * telnet-470 [001] .N.. 419421.045894: 0x00000001:
+ *
+ * In the above:
+ *
+ * * ``telnet`` is the name of the current task.
+ * * ``470`` is the PID of the current task.
+ * * ``001`` is the CPU number on which the task is
+ * running.
+ * * In ``.N..``, each character refers to a set of
+ * options (whether irqs are enabled, scheduling
+ * options, whether hard/softirqs are running, level of
+ * preempt_disabled respectively). **N** means that
+ * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
+ * are set.
+ * * ``419421.045894`` is a timestamp.
+ * * ``0x00000001`` is a fake value used by BPF for the
+ * instruction pointer register.
+ * * ```` is the message formatted with
+ * *fmt*.
+ *
+ * The conversion specifiers supported by *fmt* are similar, but
+ * more limited than for printk(). They are **%d**, **%i**,
+ * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
+ * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
+ * of field, padding with zeroes, etc.) is available, and the
+ * helper will return **-EINVAL** (but print nothing) if it
+ * encounters an unknown specifier.
+ *
+ * Also, note that **bpf_trace_printk**\ () is slow, and should
+ * only be used for debugging purposes. For this reason, a notice
+ * block (spanning several lines) is printed to kernel logs and
+ * states that the helper should not be used "for production use"
+ * the first time this helper is used (or more precisely, when
+ * **trace_printk**\ () buffers are allocated). For passing values
+ * to user space, perf events should be preferred.
+ *
+ * Returns
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ */
+static long (*bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6;
+
+/*
+ * bpf_get_prandom_u32
+ *
+ * Get a pseudo-random number.
+ *
+ * From a security point of view, this helper uses its own
+ * pseudo-random internal state, and cannot be used to infer the
+ * seed of other random functions in the kernel. However, it is
+ * essential to note that the generator used by the helper is not
+ * cryptographically secure.
+ *
+ * Returns
+ * A random 32-bit unsigned value.
+ */
+static __u32 (*bpf_get_prandom_u32)(void) = (void *) 7;
+
+/*
+ * bpf_get_smp_processor_id
+ *
+ * Get the SMP (symmetric multiprocessing) processor id. Note that
+ * all programs run with migration disabled, which means that the
+ * SMP processor id is stable during all the execution of the
+ * program.
+ *
+ * Returns
+ * The SMP id of the processor running the program.
+ */
+static __u32 (*bpf_get_smp_processor_id)(void) = (void *) 8;
+
+/*
+ * bpf_skb_store_bytes
+ *
+ * Store *len* bytes from address *from* into the packet
+ * associated to *skb*, at *offset*. *flags* are a combination of
+ * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
+ * checksum for the packet after storing the bytes) and
+ * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
+ * **->swhash** and *skb*\ **->l4hash** to 0).
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9;
+
+/*
+ * bpf_l3_csum_replace
+ *
+ * Recompute the layer 3 (e.g. IP) checksum for the packet
+ * associated to *skb*. Computation is incremental, so the helper
+ * must know the former value of the header field that was
+ * modified (*from*), the new value of this field (*to*), and the
+ * number of bytes (2 or 4) for this field, stored in *size*.
+ * Alternatively, it is possible to store the difference between
+ * the previous and the new values of the header field in *to*, by
+ * setting *from* and *size* to 0. For both methods, *offset*
+ * indicates the location of the IP checksum within the packet.
+ *
+ * This helper works in combination with **bpf_csum_diff**\ (),
+ * which does not update the checksum in-place, but offers more
+ * flexibility and can handle sizes larger than 2 or 4 for the
+ * checksum to update.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10;
+
+/*
+ * bpf_l4_csum_replace
+ *
+ * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
+ * packet associated to *skb*. Computation is incremental, so the
+ * helper must know the former value of the header field that was
+ * modified (*from*), the new value of this field (*to*), and the
+ * number of bytes (2 or 4) for this field, stored on the lowest
+ * four bits of *flags*. Alternatively, it is possible to store
+ * the difference between the previous and the new values of the
+ * header field in *to*, by setting *from* and the four lowest
+ * bits of *flags* to 0. For both methods, *offset* indicates the
+ * location of the IP checksum within the packet. In addition to
+ * the size of the field, *flags* can be added (bitwise OR) actual
+ * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
+ * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
+ * for updates resulting in a null checksum the value is set to
+ * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
+ * the checksum is to be computed against a pseudo-header.
+ *
+ * This helper works in combination with **bpf_csum_diff**\ (),
+ * which does not update the checksum in-place, but offers more
+ * flexibility and can handle sizes larger than 2 or 4 for the
+ * checksum to update.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11;
+
+/*
+ * bpf_tail_call
+ *
+ * This special helper is used to trigger a "tail call", or in
+ * other words, to jump into another eBPF program. The same stack
+ * frame is used (but values on stack and in registers for the
+ * caller are not accessible to the callee). This mechanism allows
+ * for program chaining, either for raising the maximum number of
+ * available eBPF instructions, or to execute given programs in
+ * conditional blocks. For security reasons, there is an upper
+ * limit to the number of successive tail calls that can be
+ * performed.
+ *
+ * Upon call of this helper, the program attempts to jump into a
+ * program referenced at index *index* in *prog_array_map*, a
+ * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
+ * *ctx*, a pointer to the context.
+ *
+ * If the call succeeds, the kernel immediately runs the first
+ * instruction of the new program. This is not a function call,
+ * and it never returns to the previous program. If the call
+ * fails, then the helper has no effect, and the caller continues
+ * to run its subsequent instructions. A call can fail if the
+ * destination program for the jump does not exist (i.e. *index*
+ * is superior to the number of entries in *prog_array_map*), or
+ * if the maximum number of tail calls has been reached for this
+ * chain of programs. This limit is defined in the kernel by the
+ * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
+ * which is currently set to 33.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12;
+
+/*
+ * bpf_clone_redirect
+ *
+ * Clone and redirect the packet associated to *skb* to another
+ * net device of index *ifindex*. Both ingress and egress
+ * interfaces can be used for redirection. The **BPF_F_INGRESS**
+ * value in *flags* is used to make the distinction (ingress path
+ * is selected if the flag is present, egress path otherwise).
+ * This is the only flag supported for now.
+ *
+ * In comparison with **bpf_redirect**\ () helper,
+ * **bpf_clone_redirect**\ () has the associated cost of
+ * duplicating the packet buffer, but this can be executed out of
+ * the eBPF program. Conversely, **bpf_redirect**\ () is more
+ * efficient, but it is handled through an action code where the
+ * redirection happens only after the eBPF program has returned.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13;
+
+/*
+ * bpf_get_current_pid_tgid
+ *
+ *
+ * Returns
+ * A 64-bit integer containing the current tgid and pid, and
+ * created as such:
+ * *current_task*\ **->tgid << 32 \|**
+ * *current_task*\ **->pid**.
+ */
+static __u64 (*bpf_get_current_pid_tgid)(void) = (void *) 14;
+
+/*
+ * bpf_get_current_uid_gid
+ *
+ *
+ * Returns
+ * A 64-bit integer containing the current GID and UID, and
+ * created as such: *current_gid* **<< 32 \|** *current_uid*.
+ */
+static __u64 (*bpf_get_current_uid_gid)(void) = (void *) 15;
+
+/*
+ * bpf_get_current_comm
+ *
+ * Copy the **comm** attribute of the current task into *buf* of
+ * *size_of_buf*. The **comm** attribute contains the name of
+ * the executable (excluding the path) for the current task. The
+ * *size_of_buf* must be strictly positive. On success, the
+ * helper makes sure that the *buf* is NUL-terminated. On failure,
+ * it is filled with zeroes.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16;
+
+/*
+ * bpf_get_cgroup_classid
+ *
+ * Retrieve the classid for the current task, i.e. for the net_cls
+ * cgroup to which *skb* belongs.
+ *
+ * This helper can be used on TC egress path, but not on ingress.
+ *
+ * The net_cls cgroup provides an interface to tag network packets
+ * based on a user-provided identifier for all traffic coming from
+ * the tasks belonging to the related cgroup. See also the related
+ * kernel documentation, available from the Linux sources in file
+ * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
+ *
+ * The Linux kernel has two versions for cgroups: there are
+ * cgroups v1 and cgroups v2. Both are available to users, who can
+ * use a mixture of them, but note that the net_cls cgroup is for
+ * cgroup v1 only. This makes it incompatible with BPF programs
+ * run on cgroups, which is a cgroup-v2-only feature (a socket can
+ * only hold data for one version of cgroups at a time).
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
+ * "**y**" or to "**m**".
+ *
+ * Returns
+ * The classid, or 0 for the default unconfigured classid.
+ */
+static __u32 (*bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17;
+
+/*
+ * bpf_skb_vlan_push
+ *
+ * Push a *vlan_tci* (VLAN tag control information) of protocol
+ * *vlan_proto* to the packet associated to *skb*, then update
+ * the checksum. Note that if *vlan_proto* is different from
+ * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
+ * be **ETH_P_8021Q**.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18;
+
+/*
+ * bpf_skb_vlan_pop
+ *
+ * Pop a VLAN header from the packet associated to *skb*.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19;
+
+/*
+ * bpf_skb_get_tunnel_key
+ *
+ * Get tunnel metadata. This helper takes a pointer *key* to an
+ * empty **struct bpf_tunnel_key** of **size**, that will be
+ * filled with tunnel metadata for the packet associated to *skb*.
+ * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
+ * indicates that the tunnel is based on IPv6 protocol instead of
+ * IPv4.
+ *
+ * The **struct bpf_tunnel_key** is an object that generalizes the
+ * principal parameters used by various tunneling protocols into a
+ * single struct. This way, it can be used to easily make a
+ * decision based on the contents of the encapsulation header,
+ * "summarized" in this struct. In particular, it holds the IP
+ * address of the remote end (IPv4 or IPv6, depending on the case)
+ * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
+ * this struct exposes the *key*\ **->tunnel_id**, which is
+ * generally mapped to a VNI (Virtual Network Identifier), making
+ * it programmable together with the **bpf_skb_set_tunnel_key**\
+ * () helper.
+ *
+ * Let's imagine that the following code is part of a program
+ * attached to the TC ingress interface, on one end of a GRE
+ * tunnel, and is supposed to filter out all messages coming from
+ * remote ends with IPv4 address other than 10.0.0.1:
+ *
+ * ::
+ *
+ * int ret;
+ * struct bpf_tunnel_key key = {};
+ *
+ * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ * if (ret < 0)
+ * return TC_ACT_SHOT; // drop packet
+ *
+ * if (key.remote_ipv4 != 0x0a000001)
+ * return TC_ACT_SHOT; // drop packet
+ *
+ * return TC_ACT_OK; // accept packet
+ *
+ * This interface can also be used with all encapsulation devices
+ * that can operate in "collect metadata" mode: instead of having
+ * one network device per specific configuration, the "collect
+ * metadata" mode only requires a single device where the
+ * configuration can be extracted from this helper.
+ *
+ * This can be used together with various tunnels such as VXLan,
+ * Geneve, GRE or IP in IP (IPIP).
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20;
+
+/*
+ * bpf_skb_set_tunnel_key
+ *
+ * Populate tunnel metadata for packet associated to *skb.* The
+ * tunnel metadata is set to the contents of *key*, of *size*. The
+ * *flags* can be set to a combination of the following values:
+ *
+ * **BPF_F_TUNINFO_IPV6**
+ * Indicate that the tunnel is based on IPv6 protocol
+ * instead of IPv4.
+ * **BPF_F_ZERO_CSUM_TX**
+ * For IPv4 packets, add a flag to tunnel metadata
+ * indicating that checksum computation should be skipped
+ * and checksum set to zeroes.
+ * **BPF_F_DONT_FRAGMENT**
+ * Add a flag to tunnel metadata indicating that the
+ * packet should not be fragmented.
+ * **BPF_F_SEQ_NUMBER**
+ * Add a flag to tunnel metadata indicating that a
+ * sequence number should be added to tunnel header before
+ * sending the packet. This flag was added for GRE
+ * encapsulation, but might be used with other protocols
+ * as well in the future.
+ *
+ * Here is a typical usage on the transmit path:
+ *
+ * ::
+ *
+ * struct bpf_tunnel_key key;
+ * populate key ...
+ * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
+ *
+ * See also the description of the **bpf_skb_get_tunnel_key**\ ()
+ * helper for additional information.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21;
+
+/*
+ * bpf_perf_event_read
+ *
+ * Read the value of a perf event counter. This helper relies on a
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
+ * the perf event counter is selected when *map* is updated with
+ * perf event file descriptors. The *map* is an array whose size
+ * is the number of available CPUs, and each cell contains a value
+ * relative to one CPU. The value to retrieve is indicated by
+ * *flags*, that contains the index of the CPU to look up, masked
+ * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
+ * **BPF_F_CURRENT_CPU** to indicate that the value for the
+ * current CPU should be retrieved.
+ *
+ * Note that before Linux 4.13, only hardware perf event can be
+ * retrieved.
+ *
+ * Also, be aware that the newer helper
+ * **bpf_perf_event_read_value**\ () is recommended over
+ * **bpf_perf_event_read**\ () in general. The latter has some ABI
+ * quirks where error and counter value are used as a return code
+ * (which is wrong to do since ranges may overlap). This issue is
+ * fixed with **bpf_perf_event_read_value**\ (), which at the same
+ * time provides more features over the **bpf_perf_event_read**\
+ * () interface. Please refer to the description of
+ * **bpf_perf_event_read_value**\ () for details.
+ *
+ * Returns
+ * The value of the perf event counter read from the map, or a
+ * negative error code in case of failure.
+ */
+static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22;
+
+/*
+ * bpf_redirect
+ *
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_clone_redirect**\
+ * (), except that the packet is not cloned, which provides
+ * increased performance.
+ *
+ * Except for XDP, both ingress and egress interfaces can be used
+ * for redirection. The **BPF_F_INGRESS** value in *flags* is used
+ * to make the distinction (ingress path is selected if the flag
+ * is present, egress path otherwise). Currently, XDP only
+ * supports redirection to the egress interface, and accepts no
+ * flag at all.
+ *
+ * The same effect can also be attained with the more generic
+ * **bpf_redirect_map**\ (), which uses a BPF map to store the
+ * redirect target instead of providing it directly to the helper.
+ *
+ * Returns
+ * For XDP, the helper returns **XDP_REDIRECT** on success or
+ * **XDP_ABORTED** on error. For other program types, the values
+ * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
+ * error.
+ */
+static long (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23;
+
+/*
+ * bpf_get_route_realm
+ *
+ * Retrieve the realm or the route, that is to say the
+ * **tclassid** field of the destination for the *skb*. The
+ * identifier retrieved is a user-provided tag, similar to the
+ * one used with the net_cls cgroup (see description for
+ * **bpf_get_cgroup_classid**\ () helper), but here this tag is
+ * held by a route (a destination entry), not by a task.
+ *
+ * Retrieving this identifier works with the clsact TC egress hook
+ * (see also **tc-bpf(8)**), or alternatively on conventional
+ * classful egress qdiscs, but not on TC ingress path. In case of
+ * clsact TC egress hook, this has the advantage that, internally,
+ * the destination entry has not been dropped yet in the transmit
+ * path. Therefore, the destination entry does not need to be
+ * artificially held via **netif_keep_dst**\ () for a classful
+ * qdisc until the *skb* is freed.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_IP_ROUTE_CLASSID** configuration option.
+ *
+ * Returns
+ * The realm of the route for the packet associated to *skb*, or 0
+ * if none was found.
+ */
+static __u32 (*bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24;
+
+/*
+ * bpf_perf_event_output
+ *
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * The context of the program *ctx* needs also be passed to the
+ * helper.
+ *
+ * On user space, a program willing to read the values needs to
+ * call **perf_event_open**\ () on the perf event (either for
+ * one or for all CPUs) and to store the file descriptor into the
+ * *map*. This must be done before the eBPF program can send data
+ * into it. An example is available in file
+ * *samples/bpf/trace_output_user.c* in the Linux kernel source
+ * tree (the eBPF program counterpart is in
+ * *samples/bpf/trace_output_kern.c*).
+ *
+ * **bpf_perf_event_output**\ () achieves better performance
+ * than **bpf_trace_printk**\ () for sharing data with user
+ * space, and is much better suitable for streaming data from eBPF
+ * programs.
+ *
+ * Note that this helper is not restricted to tracing use cases
+ * and can be used with programs attached to TC or XDP as well,
+ * where it allows for passing data to user space listeners. Data
+ * can be:
+ *
+ * * Only custom structs,
+ * * Only the packet payload, or
+ * * A combination of both.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25;
+
+/*
+ * bpf_skb_load_bytes
+ *
+ * This helper was provided as an easy way to load data from a
+ * packet. It can be used to load *len* bytes from *offset* from
+ * the packet associated to *skb*, into the buffer pointed by
+ * *to*.
+ *
+ * Since Linux 4.7, usage of this helper has mostly been replaced
+ * by "direct packet access", enabling packet data to be
+ * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
+ * pointing respectively to the first byte of packet data and to
+ * the byte after the last byte of packet data. However, it
+ * remains useful if one wishes to read large quantities of data
+ * at once from a packet into the eBPF stack.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26;
+
+/*
+ * bpf_get_stackid
+ *
+ * Walk a user or a kernel stack and return its id. To achieve
+ * this, the helper needs *ctx*, which is a pointer to the context
+ * on which the tracing program is executed, and a pointer to a
+ * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * a combination of the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_FAST_STACK_CMP**
+ * Compare stacks by hash only.
+ * **BPF_F_REUSE_STACKID**
+ * If two different stacks hash into the same *stackid*,
+ * discard the old one.
+ *
+ * The stack id retrieved is a 32 bit long integer handle which
+ * can be further combined with other data (including other stack
+ * ids) and used as a key into maps. This can be useful for
+ * generating a variety of graphs (such as flame graphs or off-cpu
+ * graphs).
+ *
+ * For walking a stack, this helper is an improvement over
+ * **bpf_probe_read**\ (), which can be used with unrolled loops
+ * but is not efficient and consumes a lot of eBPF instructions.
+ * Instead, **bpf_get_stackid**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=
+ *
+ * Returns
+ * The positive or null stack id on success, or a negative error
+ * in case of failure.
+ */
+static long (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27;
+
+/*
+ * bpf_csum_diff
+ *
+ * Compute a checksum difference, from the raw buffer pointed by
+ * *from*, of length *from_size* (that must be a multiple of 4),
+ * towards the raw buffer pointed by *to*, of size *to_size*
+ * (same remark). An optional *seed* can be added to the value
+ * (this can be cascaded, the seed may come from a previous call
+ * to the helper).
+ *
+ * This is flexible enough to be used in several ways:
+ *
+ * * With *from_size* == 0, *to_size* > 0 and *seed* set to
+ * checksum, it can be used when pushing new data.
+ * * With *from_size* > 0, *to_size* == 0 and *seed* set to
+ * checksum, it can be used when removing data from a packet.
+ * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
+ * can be used to compute a diff. Note that *from_size* and
+ * *to_size* do not need to be equal.
+ *
+ * This helper can be used in combination with
+ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
+ * which one can feed in the difference computed with
+ * **bpf_csum_diff**\ ().
+ *
+ * Returns
+ * The checksum result, or a negative error code in case of
+ * failure.
+ */
+static __s64 (*bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28;
+
+/*
+ * bpf_skb_get_tunnel_opt
+ *
+ * Retrieve tunnel options metadata for the packet associated to
+ * *skb*, and store the raw tunnel option data to the buffer *opt*
+ * of *size*.
+ *
+ * This helper can be used with encapsulation devices that can
+ * operate in "collect metadata" mode (please refer to the related
+ * note in the description of **bpf_skb_get_tunnel_key**\ () for
+ * more details). A particular example where this can be used is
+ * in combination with the Geneve encapsulation protocol, where it
+ * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
+ * and retrieving arbitrary TLVs (Type-Length-Value headers) from
+ * the eBPF program. This allows for full customization of these
+ * headers.
+ *
+ * Returns
+ * The size of the option data retrieved.
+ */
+static long (*bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29;
+
+/*
+ * bpf_skb_set_tunnel_opt
+ *
+ * Set tunnel options metadata for the packet associated to *skb*
+ * to the option data contained in the raw buffer *opt* of *size*.
+ *
+ * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
+ * helper for additional information.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30;
+
+/*
+ * bpf_skb_change_proto
+ *
+ * Change the protocol of the *skb* to *proto*. Currently
+ * supported are transition from IPv4 to IPv6, and from IPv6 to
+ * IPv4. The helper takes care of the groundwork for the
+ * transition, including resizing the socket buffer. The eBPF
+ * program is expected to fill the new headers, if any, via
+ * **skb_store_bytes**\ () and to recompute the checksums with
+ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
+ * (). The main case for this helper is to perform NAT64
+ * operations out of an eBPF program.
+ *
+ * Internally, the GSO type is marked as dodgy so that headers are
+ * checked and segments are recalculated by the GSO/GRO engine.
+ * The size for GSO target is adapted as well.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31;
+
+/*
+ * bpf_skb_change_type
+ *
+ * Change the packet type for the packet associated to *skb*. This
+ * comes down to setting *skb*\ **->pkt_type** to *type*, except
+ * the eBPF program does not have a write access to *skb*\
+ * **->pkt_type** beside this helper. Using a helper here allows
+ * for graceful handling of errors.
+ *
+ * The major use case is to change incoming *skb*s to
+ * **PACKET_HOST** in a programmatic way instead of having to
+ * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
+ * example.
+ *
+ * Note that *type* only allows certain values. At this time, they
+ * are:
+ *
+ * **PACKET_HOST**
+ * Packet is for us.
+ * **PACKET_BROADCAST**
+ * Send packet to all.
+ * **PACKET_MULTICAST**
+ * Send packet to group.
+ * **PACKET_OTHERHOST**
+ * Send packet to someone else.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32;
+
+/*
+ * bpf_skb_under_cgroup
+ *
+ * Check whether *skb* is a descendant of the cgroup2 held by
+ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
+ *
+ * Returns
+ * The return value depends on the result of the test, and can be:
+ *
+ * * 0, if the *skb* failed the cgroup2 descendant test.
+ * * 1, if the *skb* succeeded the cgroup2 descendant test.
+ * * A negative error code, if an error occurred.
+ */
+static long (*bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33;
+
+/*
+ * bpf_get_hash_recalc
+ *
+ * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
+ * not set, in particular if the hash was cleared due to mangling,
+ * recompute this hash. Later accesses to the hash can be done
+ * directly with *skb*\ **->hash**.
+ *
+ * Calling **bpf_set_hash_invalid**\ (), changing a packet
+ * prototype with **bpf_skb_change_proto**\ (), or calling
+ * **bpf_skb_store_bytes**\ () with the
+ * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
+ * the hash and to trigger a new computation for the next call to
+ * **bpf_get_hash_recalc**\ ().
+ *
+ * Returns
+ * The 32-bit hash.
+ */
+static __u32 (*bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34;
+
+/*
+ * bpf_get_current_task
+ *
+ *
+ * Returns
+ * A pointer to the current task struct.
+ */
+static __u64 (*bpf_get_current_task)(void) = (void *) 35;
+
+/*
+ * bpf_probe_write_user
+ *
+ * Attempt in a safe way to write *len* bytes from the buffer
+ * *src* to *dst* in memory. It only works for threads that are in
+ * user context, and *dst* must be a valid user space address.
+ *
+ * This helper should not be used to implement any kind of
+ * security mechanism because of TOC-TOU attacks, but rather to
+ * debug, divert, and manipulate execution of semi-cooperative
+ * processes.
+ *
+ * Keep in mind that this feature is meant for experiments, and it
+ * has a risk of crashing the system and running programs.
+ * Therefore, when an eBPF program using this helper is attached,
+ * a warning including PID and process name is printed to kernel
+ * logs.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36;
+
+/*
+ * bpf_current_task_under_cgroup
+ *
+ * Check whether the probe is being run is the context of a given
+ * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
+ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
+ *
+ * Returns
+ * The return value depends on the result of the test, and can be:
+ *
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
+ * * A negative error code, if an error occurred.
+ */
+static long (*bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37;
+
+/*
+ * bpf_skb_change_tail
+ *
+ * Resize (trim or grow) the packet associated to *skb* to the
+ * new *len*. The *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * The basic idea is that the helper performs the needed work to
+ * change the size of the packet, then the eBPF program rewrites
+ * the rest via helpers like **bpf_skb_store_bytes**\ (),
+ * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
+ * and others. This helper is a slow path utility intended for
+ * replies with control messages. And because it is targeted for
+ * slow path, the helper itself can afford to be slow: it
+ * implicitly linearizes, unclones and drops offloads from the
+ * *skb*.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38;
+
+/*
+ * bpf_skb_pull_data
+ *
+ * Pull in non-linear data in case the *skb* is non-linear and not
+ * all of *len* are part of the linear section. Make *len* bytes
+ * from *skb* readable and writable. If a zero value is passed for
+ * *len*, then the whole length of the *skb* is pulled.
+ *
+ * This helper is only needed for reading and writing with direct
+ * packet access.
+ *
+ * For direct packet access, testing that offsets to access
+ * are within packet boundaries (test on *skb*\ **->data_end**) is
+ * susceptible to fail if offsets are invalid, or if the requested
+ * data is in non-linear parts of the *skb*. On failure the
+ * program can just bail out, or in the case of a non-linear
+ * buffer, use a helper to make the data available. The
+ * **bpf_skb_load_bytes**\ () helper is a first solution to access
+ * the data. Another one consists in using **bpf_skb_pull_data**
+ * to pull in once the non-linear parts, then retesting and
+ * eventually access the data.
+ *
+ * At the same time, this also makes sure the *skb* is uncloned,
+ * which is a necessary condition for direct write. As this needs
+ * to be an invariant for the write part only, the verifier
+ * detects writes and adds a prologue that is calling
+ * **bpf_skb_pull_data()** to effectively unclone the *skb* from
+ * the very beginning in case it is indeed cloned.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39;
+
+/*
+ * bpf_csum_update
+ *
+ * Add the checksum *csum* into *skb*\ **->csum** in case the
+ * driver has supplied a checksum for the entire packet into that
+ * field. Return an error otherwise. This helper is intended to be
+ * used in combination with **bpf_csum_diff**\ (), in particular
+ * when the checksum needs to be updated after data has been
+ * written into the packet through direct packet access.
+ *
+ * Returns
+ * The checksum on success, or a negative error code in case of
+ * failure.
+ */
+static __s64 (*bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40;
+
+/*
+ * bpf_set_hash_invalid
+ *
+ * Invalidate the current *skb*\ **->hash**. It can be used after
+ * mangling on headers through direct packet access, in order to
+ * indicate that the hash is outdated and to trigger a
+ * recalculation the next time the kernel tries to access this
+ * hash or when the **bpf_get_hash_recalc**\ () helper is called.
+ *
+ */
+static void (*bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41;
+
+/*
+ * bpf_get_numa_node_id
+ *
+ * Return the id of the current NUMA node. The primary use case
+ * for this helper is the selection of sockets for the local NUMA
+ * node, when the program is attached to sockets using the
+ * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
+ * but the helper is also available to other eBPF program types,
+ * similarly to **bpf_get_smp_processor_id**\ ().
+ *
+ * Returns
+ * The id of current NUMA node.
+ */
+static long (*bpf_get_numa_node_id)(void) = (void *) 42;
+
+/*
+ * bpf_skb_change_head
+ *
+ * Grows headroom of packet associated to *skb* and adjusts the
+ * offset of the MAC header accordingly, adding *len* bytes of
+ * space. It automatically extends and reallocates memory as
+ * required.
+ *
+ * This helper can be used on a layer 3 *skb* to push a MAC header
+ * for redirection into a layer 2 device.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43;
+
+/*
+ * bpf_xdp_adjust_head
+ *
+ * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
+ * it is possible to use a negative value for *delta*. This helper
+ * can be used to prepare the packet for pushing or popping
+ * headers.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44;
+
+/*
+ * bpf_probe_read_str
+ *
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
+ * more details.
+ *
+ * Generally, use **bpf_probe_read_user_str**\ () or
+ * **bpf_probe_read_kernel_str**\ () instead.
+ *
+ * Returns
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ */
+static long (*bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45;
+
+/*
+ * bpf_get_socket_cookie
+ *
+ * If the **struct sk_buff** pointed by *skb* has a known socket,
+ * retrieve the cookie (generated by the kernel) of this socket.
+ * If no cookie has been set yet, generate a new cookie. Once
+ * generated, the socket cookie remains stable for the life of the
+ * socket. This helper can be useful for monitoring per socket
+ * networking traffic statistics as it provides a global socket
+ * identifier that can be assumed unique.
+ *
+ * Returns
+ * A 8-byte long unique number on success, or 0 if the socket
+ * field is missing inside *skb*.
+ */
+static __u64 (*bpf_get_socket_cookie)(void *ctx) = (void *) 46;
+
+/*
+ * bpf_get_socket_uid
+ *
+ *
+ * Returns
+ * The owner UID of the socket associated to *skb*. If the socket
+ * is **NULL**, or if it is not a full socket (i.e. if it is a
+ * time-wait or a request socket instead), **overflowuid** value
+ * is returned (note that **overflowuid** might also be the actual
+ * UID value for the socket).
+ */
+static __u32 (*bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47;
+
+/*
+ * bpf_set_hash
+ *
+ * Set the full hash for *skb* (set the field *skb*\ **->hash**)
+ * to value *hash*.
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48;
+
+/*
+ * bpf_setsockopt
+ *
+ * Emulate a call to **setsockopt()** on the socket associated to
+ * *bpf_socket*, which must be a full socket. The *level* at
+ * which the option resides and the name *optname* of the option
+ * must be specified, see **setsockopt(2)** for more information.
+ * The option value of length *optlen* is pointed by *optval*.
+ *
+ * *bpf_socket* should be one of the following:
+ *
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
+ * This helper actually implements a subset of **setsockopt()**.
+ * It supports the following *level*\ s:
+ *
+ * * **SOL_SOCKET**, which supports the following *optname*\ s:
+ * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
+ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
+ * * **IPPROTO_TCP**, which supports the following *optname*\ s:
+ * **TCP_CONGESTION**, **TCP_BPF_IW**,
+ * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
+ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
+ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49;
+
+/*
+ * bpf_skb_adjust_room
+ *
+ * Grow or shrink the room for data in the packet associated to
+ * *skb* by *len_diff*, and according to the selected *mode*.
+ *
+ * By default, the helper will reset any offloaded checksum
+ * indicator of the skb to CHECKSUM_NONE. This can be avoided
+ * by the following flag:
+ *
+ * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
+ * checksum data of the skb to CHECKSUM_NONE.
+ *
+ * There are two supported modes at this time:
+ *
+ * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
+ * (room space is added or removed below the layer 2 header).
+ *
+ * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
+ * (room space is added or removed below the layer 3 header).
+ *
+ * The following flags are supported at this time:
+ *
+ * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
+ * Adjusting mss in this way is not allowed for datagrams.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
+ * Any new space is reserved to hold a tunnel header.
+ * Configure skb offsets and other fields accordingly.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
+ * Use with ENCAP_L3 flags to further specify the tunnel type.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
+ * Use with ENCAP_L3/L4 flags to further specify the tunnel
+ * type; *len* is the length of the inner MAC header.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
+ * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
+ * L2 type as Ethernet.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50;
+
+/*
+ * bpf_redirect_map
+ *
+ * Redirect the packet to the endpoint referenced by *map* at
+ * index *key*. Depending on its type, this *map* can contain
+ * references to net devices (for forwarding packets through other
+ * ports), or to CPUs (for redirecting XDP frames to another CPU;
+ * but this is only implemented for native XDP (with driver
+ * support) as of this writing).
+ *
+ * The lower two bits of *flags* are used as the return code if
+ * the map lookup fails. This is so that the return value can be
+ * one of the XDP program return codes up to **XDP_TX**, as chosen
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
+ *
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
+ *
+ * See also **bpf_redirect**\ (), which only supports redirecting
+ * to an ifindex, but doesn't require a map to do so.
+ *
+ * Returns
+ * **XDP_REDIRECT** on success, or the value of the two lower bits
+ * of the *flags* argument on error.
+ */
+static long (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void *) 51;
+
+/*
+ * bpf_sk_redirect_map
+ *
+ * Redirect the packet to the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ *
+ * Returns
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ */
+static long (*bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52;
+
+/*
+ * bpf_sock_map_update
+ *
+ * Add an entry to, or update a *map* referencing sockets. The
+ * *skops* is used as a new value for the entry associated to
+ * *key*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * If the *map* has eBPF programs (parser and verdict), those will
+ * be inherited by the socket being added. If the socket is
+ * already attached to eBPF programs, this results in an error.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53;
+
+/*
+ * bpf_xdp_adjust_meta
+ *
+ * Adjust the address pointed by *xdp_md*\ **->data_meta** by
+ * *delta* (which can be positive or negative). Note that this
+ * operation modifies the address stored in *xdp_md*\ **->data**,
+ * so the latter must be loaded only after the helper has been
+ * called.
+ *
+ * The use of *xdp_md*\ **->data_meta** is optional and programs
+ * are not required to use it. The rationale is that when the
+ * packet is processed with XDP (e.g. as DoS filter), it is
+ * possible to push further meta data along with it before passing
+ * to the stack, and to give the guarantee that an ingress eBPF
+ * program attached as a TC classifier on the same device can pick
+ * this up for further post-processing. Since TC works with socket
+ * buffers, it remains possible to set from XDP the **mark** or
+ * **priority** pointers, or other pointers for the socket buffer.
+ * Having this scratch space generic and programmable allows for
+ * more flexibility as the user is free to store whatever meta
+ * data they need.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54;
+
+/*
+ * bpf_perf_event_read_value
+ *
+ * Read the value of a perf event counter, and store it into *buf*
+ * of size *buf_size*. This helper relies on a *map* of type
+ * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
+ * counter is selected when *map* is updated with perf event file
+ * descriptors. The *map* is an array whose size is the number of
+ * available CPUs, and each cell contains a value relative to one
+ * CPU. The value to retrieve is indicated by *flags*, that
+ * contains the index of the CPU to look up, masked with
+ * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
+ * **BPF_F_CURRENT_CPU** to indicate that the value for the
+ * current CPU should be retrieved.
+ *
+ * This helper behaves in a way close to
+ * **bpf_perf_event_read**\ () helper, save that instead of
+ * just returning the value observed, it fills the *buf*
+ * structure. This allows for additional data to be retrieved: in
+ * particular, the enabled and running times (in *buf*\
+ * **->enabled** and *buf*\ **->running**, respectively) are
+ * copied. In general, **bpf_perf_event_read_value**\ () is
+ * recommended over **bpf_perf_event_read**\ (), which has some
+ * ABI issues and provides fewer functionalities.
+ *
+ * These values are interesting, because hardware PMU (Performance
+ * Monitoring Unit) counters are limited resources. When there are
+ * more PMU based perf events opened than available counters,
+ * kernel will multiplex these events so each event gets certain
+ * percentage (but not all) of the PMU time. In case that
+ * multiplexing happens, the number of samples or counter value
+ * will not reflect the case compared to when no multiplexing
+ * occurs. This makes comparison between different runs difficult.
+ * Typically, the counter value should be normalized before
+ * comparing to other experiments. The usual normalization is done
+ * as follows.
+ *
+ * ::
+ *
+ * normalized_counter = counter * t_enabled / t_running
+ *
+ * Where t_enabled is the time enabled for event and t_running is
+ * the time running for event since last normalization. The
+ * enabled and running times are accumulated since the perf event
+ * open. To achieve scaling factor between two invocations of an
+ * eBPF program, users can use CPU id as the key (which is
+ * typical for perf array usage model) to remember the previous
+ * value and do the calculation inside the eBPF program.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55;
+
+/*
+ * bpf_perf_prog_read_value
+ *
+ * For en eBPF program attached to a perf event, retrieve the
+ * value of the event counter associated to *ctx* and store it in
+ * the structure pointed by *buf* and of size *buf_size*. Enabled
+ * and running times are also stored in the structure (see
+ * description of helper **bpf_perf_event_read_value**\ () for
+ * more details).
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56;
+
+/*
+ * bpf_getsockopt
+ *
+ * Emulate a call to **getsockopt()** on the socket associated to
+ * *bpf_socket*, which must be a full socket. The *level* at
+ * which the option resides and the name *optname* of the option
+ * must be specified, see **getsockopt(2)** for more information.
+ * The retrieved value is stored in the structure pointed by
+ * *opval* and of length *optlen*.
+ *
+ * *bpf_socket* should be one of the following:
+ *
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
+ * This helper actually implements a subset of **getsockopt()**.
+ * It supports the following *level*\ s:
+ *
+ * * **IPPROTO_TCP**, which supports *optname*
+ * **TCP_CONGESTION**.
+ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
+ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57;
+
+/*
+ * bpf_override_return
+ *
+ * Used for error injection, this helper uses kprobes to override
+ * the return value of the probed function, and to set it to *rc*.
+ * The first argument is the context *regs* on which the kprobe
+ * works.
+ *
+ * This helper works by setting the PC (program counter)
+ * to an override function which is run in place of the original
+ * probed function. This means the probed function is not run at
+ * all. The replacement function just returns with the required
+ * value.
+ *
+ * This helper has security implications, and thus is subject to
+ * restrictions. It is only available if the kernel was compiled
+ * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
+ * option, and in this case it only works on functions tagged with
+ * **ALLOW_ERROR_INJECTION** in the kernel code.
+ *
+ * Also, the helper is only available for the architectures having
+ * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
+ * x86 architecture is the only one to support this feature.
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58;
+
+/*
+ * bpf_sock_ops_cb_flags_set
+ *
+ * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
+ * for the full TCP socket associated to *bpf_sock_ops* to
+ * *argval*.
+ *
+ * The primary use of this field is to determine if there should
+ * be calls to eBPF programs of type
+ * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
+ * code. A program of the same type can change its value, per
+ * connection and as necessary, when the connection is
+ * established. This field is directly accessible for reading, but
+ * this helper must be used for updates in order to return an
+ * error if an eBPF program tries to set a callback that is not
+ * supported in the current kernel.
+ *
+ * *argval* is a flag array which can combine these flags:
+ *
+ * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
+ * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
+ * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
+ * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
+ *
+ * Therefore, this function can be used to clear a callback flag by
+ * setting the appropriate bit to zero. e.g. to disable the RTO
+ * callback:
+ *
+ * **bpf_sock_ops_cb_flags_set(bpf_sock,**
+ * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
+ *
+ * Here are some examples of where one could call such eBPF
+ * program:
+ *
+ * * When RTO fires.
+ * * When a packet is retransmitted.
+ * * When the connection terminates.
+ * * When a packet is sent.
+ * * When a packet is received.
+ *
+ * Returns
+ * Code **-EINVAL** if the socket is not a full TCP socket;
+ * otherwise, a positive number containing the bits that could not
+ * be set is returned (which comes down to 0 if all bits were set
+ * as required).
+ */
+static long (*bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59;
+
+/*
+ * bpf_msg_redirect_map
+ *
+ * This helper is used in programs implementing policies at the
+ * socket level. If the message *msg* is allowed to pass (i.e. if
+ * the verdict eBPF program returns **SK_PASS**), redirect it to
+ * the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ *
+ * Returns
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ */
+static long (*bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60;
+
+/*
+ * bpf_msg_apply_bytes
+ *
+ * For socket policies, apply the verdict of the eBPF program to
+ * the next *bytes* (number of bytes) of message *msg*.
+ *
+ * For example, this helper can be used in the following cases:
+ *
+ * * A single **sendmsg**\ () or **sendfile**\ () system call
+ * contains multiple logical messages that the eBPF program is
+ * supposed to read and for which it should apply a verdict.
+ * * An eBPF program only cares to read the first *bytes* of a
+ * *msg*. If the message has a large payload, then setting up
+ * and calling the eBPF program repeatedly for all bytes, even
+ * though the verdict is already known, would create unnecessary
+ * overhead.
+ *
+ * When called from within an eBPF program, the helper sets a
+ * counter internal to the BPF infrastructure, that is used to
+ * apply the last verdict to the next *bytes*. If *bytes* is
+ * smaller than the current data being processed from a
+ * **sendmsg**\ () or **sendfile**\ () system call, the first
+ * *bytes* will be sent and the eBPF program will be re-run with
+ * the pointer for start of data pointing to byte number *bytes*
+ * **+ 1**. If *bytes* is larger than the current data being
+ * processed, then the eBPF verdict will be applied to multiple
+ * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
+ * consumed.
+ *
+ * Note that if a socket closes with the internal counter holding
+ * a non-zero value, this is not a problem because data is not
+ * being buffered for *bytes* and is sent as it is received.
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61;
+
+/*
+ * bpf_msg_cork_bytes
+ *
+ * For socket policies, prevent the execution of the verdict eBPF
+ * program for message *msg* until *bytes* (byte number) have been
+ * accumulated.
+ *
+ * This can be used when one needs a specific number of bytes
+ * before a verdict can be assigned, even if the data spans
+ * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
+ * case would be a user calling **sendmsg**\ () repeatedly with
+ * 1-byte long message segments. Obviously, this is bad for
+ * performance, but it is still valid. If the eBPF program needs
+ * *bytes* bytes to validate a header, this helper can be used to
+ * prevent the eBPF program to be called again until *bytes* have
+ * been accumulated.
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62;
+
+/*
+ * bpf_msg_pull_data
+ *
+ * For socket policies, pull in non-linear data from user space
+ * for *msg* and set pointers *msg*\ **->data** and *msg*\
+ * **->data_end** to *start* and *end* bytes offsets into *msg*,
+ * respectively.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it can only parse data that the (**data**, **data_end**)
+ * pointers have already consumed. For **sendmsg**\ () hooks this
+ * is likely the first scatterlist element. But for calls relying
+ * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
+ * be the range (**0**, **0**) because the data is shared with
+ * user space and by default the objective is to avoid allowing
+ * user space to modify data while (or after) eBPF verdict is
+ * being decided. This helper can be used to pull in data and to
+ * set the start and end pointer to given values. Data will be
+ * copied if necessary (i.e. if data was not linear and if start
+ * and end pointers do not point to the same chunk).
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63;
+
+/*
+ * bpf_bind
+ *
+ * Bind the socket associated to *ctx* to the address pointed by
+ * *addr*, of length *addr_len*. This allows for making outgoing
+ * connection from the desired IP address, which can be useful for
+ * example when all processes inside a cgroup should use one
+ * single IP address on a host that has multiple IP configured.
+ *
+ * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
+ * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
+ * **AF_INET6**). It's advised to pass zero port (**sin_port**
+ * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
+ * behavior and lets the kernel efficiently pick up an unused
+ * port as long as 4-tuple is unique. Passing non-zero port might
+ * lead to degraded performance.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64;
+
+/*
+ * bpf_xdp_adjust_tail
+ *
+ * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
+ * possible to both shrink and grow the packet tail.
+ * Shrink done via *delta* being a negative integer.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65;
+
+/*
+ * bpf_skb_get_xfrm_state
+ *
+ * Retrieve the XFRM state (IP transform framework, see also
+ * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
+ *
+ * The retrieved value is stored in the **struct bpf_xfrm_state**
+ * pointed by *xfrm_state* and of length *size*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_XFRM** configuration option.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66;
+
+/*
+ * bpf_get_stack
+ *
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *ctx*, which is a pointer
+ * to the context on which the tracing program is executed.
+ * To store the stacktrace, the bpf program provides *buf* with
+ * a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=
+ *
+ * Returns
+ * A non-negative value equal to or less than *size* on success,
+ * or a negative error in case of failure.
+ */
+static long (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67;
+
+/*
+ * bpf_skb_load_bytes_relative
+ *
+ * This helper is similar to **bpf_skb_load_bytes**\ () in that
+ * it provides an easy way to load *len* bytes from *offset*
+ * from the packet associated to *skb*, into the buffer pointed
+ * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
+ * a fifth argument *start_header* exists in order to select a
+ * base offset to start from. *start_header* can be one of:
+ *
+ * **BPF_HDR_START_MAC**
+ * Base offset to load data from is *skb*'s mac header.
+ * **BPF_HDR_START_NET**
+ * Base offset to load data from is *skb*'s network header.
+ *
+ * In general, "direct packet access" is the preferred method to
+ * access packet data, however, this helper is in particular useful
+ * in socket filters where *skb*\ **->data** does not always point
+ * to the start of the mac header and where "direct packet access"
+ * is not available.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68;
+
+/*
+ * bpf_fib_lookup
+ *
+ * Do FIB lookup in kernel tables using parameters in *params*.
+ * If lookup is successful and result shows packet is to be
+ * forwarded, the neighbor tables are searched for the nexthop.
+ * If successful (ie., FIB lookup shows forwarding and nexthop
+ * is resolved), the nexthop address is returned in ipv4_dst
+ * or ipv6_dst based on family, smac is set to mac address of
+ * egress device, dmac is set to nexthop mac address, rt_metric
+ * is set to metric from route (IPv4/IPv6 only), and ifindex
+ * is set to the device index of the nexthop from the FIB lookup.
+ *
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_FIB_LOOKUP_DIRECT**
+ * Do a direct table lookup vs full lookup using FIB
+ * rules.
+ * **BPF_FIB_LOOKUP_OUTPUT**
+ * Perform lookup from an egress perspective (default is
+ * ingress).
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ *
+ * Returns
+ * * < 0 if any input argument is invalid
+ * * 0 on success (packet is forwarded, nexthop neighbor exists)
+ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ * packet is not forwarded or needs assist from full stack
+ *
+ * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
+ * was exceeded and output params->mtu_result contains the MTU.
+ */
+static long (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69;
+
+/*
+ * bpf_sock_hash_update
+ *
+ * Add an entry to, or update a sockhash *map* referencing sockets.
+ * The *skops* is used as a new value for the entry associated to
+ * *key*. *flags* is one of:
+ *
+ * **BPF_NOEXIST**
+ * The entry for *key* must not exist in the map.
+ * **BPF_EXIST**
+ * The entry for *key* must already exist in the map.
+ * **BPF_ANY**
+ * No condition on the existence of the entry for *key*.
+ *
+ * If the *map* has eBPF programs (parser and verdict), those will
+ * be inherited by the socket being added. If the socket is
+ * already attached to eBPF programs, this results in an error.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70;
+
+/*
+ * bpf_msg_redirect_hash
+ *
+ * This helper is used in programs implementing policies at the
+ * socket level. If the message *msg* is allowed to pass (i.e. if
+ * the verdict eBPF program returns **SK_PASS**), redirect it to
+ * the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress path otherwise). This is the only flag supported for now.
+ *
+ * Returns
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ */
+static long (*bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71;
+
+/*
+ * bpf_sk_redirect_hash
+ *
+ * This helper is used in programs implementing policies at the
+ * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
+ * to the socket referenced by *map* (of type
+ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
+ * egress interfaces can be used for redirection. The
+ * **BPF_F_INGRESS** value in *flags* is used to make the
+ * distinction (ingress path is selected if the flag is present,
+ * egress otherwise). This is the only flag supported for now.
+ *
+ * Returns
+ * **SK_PASS** on success, or **SK_DROP** on error.
+ */
+static long (*bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72;
+
+/*
+ * bpf_lwt_push_encap
+ *
+ * Encapsulate the packet associated to *skb* within a Layer 3
+ * protocol header. This header is provided in the buffer at
+ * address *hdr*, with *len* its size in bytes. *type* indicates
+ * the protocol of the header and can be one of:
+ *
+ * **BPF_LWT_ENCAP_SEG6**
+ * IPv6 encapsulation with Segment Routing Header
+ * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
+ * the IPv6 header is computed by the kernel.
+ * **BPF_LWT_ENCAP_SEG6_INLINE**
+ * Only works if *skb* contains an IPv6 packet. Insert a
+ * Segment Routing Header (**struct ipv6_sr_hdr**) inside
+ * the IPv6 header.
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to **LWT_BPF_MAX_HEADROOM**
+ * total bytes in all prepended headers. Please note that
+ * if **skb_is_gso**\ (*skb*) is true, no more than two
+ * headers can be prepended, and the inner header, if
+ * present, should be either GRE or UDP/GUE.
+ *
+ * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
+ * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
+ * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
+ * **BPF_PROG_TYPE_LWT_XMIT**.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73;
+
+/*
+ * bpf_lwt_seg6_store_bytes
+ *
+ * Store *len* bytes from address *from* into the packet
+ * associated to *skb*, at *offset*. Only the flags, tag and TLVs
+ * inside the outermost IPv6 Segment Routing Header can be
+ * modified through this helper.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74;
+
+/*
+ * bpf_lwt_seg6_adjust_srh
+ *
+ * Adjust the size allocated to TLVs in the outermost IPv6
+ * Segment Routing Header contained in the packet associated to
+ * *skb*, at position *offset* by *delta* bytes. Only offsets
+ * after the segments are accepted. *delta* can be as well
+ * positive (growing) as negative (shrinking).
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75;
+
+/*
+ * bpf_lwt_seg6_action
+ *
+ * Apply an IPv6 Segment Routing action of type *action* to the
+ * packet associated to *skb*. Each action takes a parameter
+ * contained at address *param*, and of length *param_len* bytes.
+ * *action* can be one of:
+ *
+ * **SEG6_LOCAL_ACTION_END_X**
+ * End.X action: Endpoint with Layer-3 cross-connect.
+ * Type of *param*: **struct in6_addr**.
+ * **SEG6_LOCAL_ACTION_END_T**
+ * End.T action: Endpoint with specific IPv6 table lookup.
+ * Type of *param*: **int**.
+ * **SEG6_LOCAL_ACTION_END_B6**
+ * End.B6 action: Endpoint bound to an SRv6 policy.
+ * Type of *param*: **struct ipv6_sr_hdr**.
+ * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
+ * End.B6.Encap action: Endpoint bound to an SRv6
+ * encapsulation policy.
+ * Type of *param*: **struct ipv6_sr_hdr**.
+ *
+ * A call to this helper is susceptible to change the underlying
+ * packet buffer. Therefore, at load time, all checks on pointers
+ * previously done by the verifier are invalidated and must be
+ * performed again, if the helper is used in combination with
+ * direct packet access.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76;
+
+/*
+ * bpf_rc_repeat
+ *
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
+ *
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_rc_repeat)(void *ctx) = (void *) 77;
+
+/*
+ * bpf_rc_keydown
+ *
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
+ *
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78;
+
+/*
+ * bpf_skb_cgroup_id
+ *
+ * Return the cgroup v2 id of the socket associated with the *skb*.
+ * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
+ * helper for cgroup v1 by providing a tag resp. identifier that
+ * can be matched on or used for map lookups e.g. to implement
+ * policy. The cgroup v2 id of a given path in the hierarchy is
+ * exposed in user space through the f_handle API in order to get
+ * to the same 64-bit id.
+ *
+ * This helper can be used on TC egress path, but not on ingress,
+ * and is available only if the kernel was compiled with the
+ * **CONFIG_SOCK_CGROUP_DATA** configuration option.
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79;
+
+/*
+ * bpf_get_current_cgroup_id
+ *
+ *
+ * Returns
+ * A 64-bit integer containing the current cgroup id based
+ * on the cgroup within which the current task is running.
+ */
+static __u64 (*bpf_get_current_cgroup_id)(void) = (void *) 80;
+
+/*
+ * bpf_get_local_storage
+ *
+ * Get the pointer to the local storage area.
+ * The type and the size of the local storage is defined
+ * by the *map* argument.
+ * The *flags* meaning is specific for each map type,
+ * and has to be 0 for cgroup local storage.
+ *
+ * Depending on the BPF program type, a local storage area
+ * can be shared between multiple instances of the BPF program,
+ * running simultaneously.
+ *
+ * A user should care about the synchronization by himself.
+ * For example, by using the **BPF_ATOMIC** instructions to alter
+ * the shared data.
+ *
+ * Returns
+ * A pointer to the local storage area.
+ */
+static void *(*bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81;
+
+/*
+ * bpf_sk_select_reuseport
+ *
+ * Select a **SO_REUSEPORT** socket from a
+ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
+ * It checks the selected socket is matching the incoming
+ * request in the socket buffer.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82;
+
+/*
+ * bpf_skb_ancestor_cgroup_id
+ *
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83;
+
+/*
+ * bpf_sk_lookup_tcp
+ *
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx*
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ *
+ * Returns
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
+ */
+static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84;
+
+/*
+ * bpf_sk_lookup_udp
+ *
+ * Look for UDP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx*
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ *
+ * Returns
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
+ */
+static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85;
+
+/*
+ * bpf_sk_release
+ *
+ * Release the reference held by *sock*. *sock* must be a
+ * non-**NULL** pointer that was returned from
+ * **bpf_sk_lookup_xxx**\ ().
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_sk_release)(void *sock) = (void *) 86;
+
+/*
+ * bpf_map_push_elem
+ *
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87;
+
+/*
+ * bpf_map_pop_elem
+ *
+ * Pop an element from *map*.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_map_pop_elem)(void *map, void *value) = (void *) 88;
+
+/*
+ * bpf_map_peek_elem
+ *
+ * Get an element from *map* without removing it.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_map_peek_elem)(void *map, void *value) = (void *) 89;
+
+/*
+ * bpf_msg_push_data
+ *
+ * For socket policies, insert *len* bytes into *msg* at offset
+ * *start*.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it may want to insert metadata or options into the *msg*.
+ * This can later be read and used by any of the lower layer BPF
+ * hooks.
+ *
+ * This helper may fail if under memory pressure (a malloc
+ * fails) in these cases BPF programs will get an appropriate
+ * error and BPF programs will need to handle them.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90;
+
+/*
+ * bpf_msg_pop_data
+ *
+ * Will remove *len* bytes from a *msg* starting at byte *start*.
+ * This may result in **ENOMEM** errors under certain situations if
+ * an allocation and copy are required due to a full ring buffer.
+ * However, the helper will try to avoid doing the allocation
+ * if possible. Other errors can occur if input parameters are
+ * invalid either due to *start* byte not being valid part of *msg*
+ * payload and/or *pop* value being to large.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91;
+
+/*
+ * bpf_rc_pointer_rel
+ *
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded pointer movement.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92;
+
+/*
+ * bpf_spin_lock
+ *
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93;
+
+/*
+ * bpf_spin_unlock
+ *
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ *
+ * Returns
+ * 0
+ */
+static long (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94;
+
+/*
+ * bpf_sk_fullsock
+ *
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in this **bpf_sock** can be accessed.
+ *
+ * Returns
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
+ */
+static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95;
+
+/*
+ * bpf_tcp_sock
+ *
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ *
+ * Returns
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
+ * case of failure.
+ */
+static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96;
+
+/*
+ * bpf_skb_ecn_set_ce
+ *
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ *
+ * Returns
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ */
+static long (*bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97;
+
+/*
+ * bpf_get_listener_sock
+ *
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ *
+ * Returns
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
+ */
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98;
+
+/*
+ * bpf_skc_lookup_tcp
+ *
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * This function is identical to **bpf_sk_lookup_tcp**\ (), except
+ * that it also returns timewait or request sockets. Use
+ * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
+ * full structure.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ *
+ * Returns
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
+ */
+static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99;
+
+/*
+ * bpf_tcp_check_syncookie
+ *
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK for
+ * the listening socket in *sk*.
+ *
+ * *iph* points to the start of the IPv4 or IPv6 header, while
+ * *iph_len* contains **sizeof**\ (**struct iphdr**) or
+ * **sizeof**\ (**struct ip6hdr**).
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains **sizeof**\ (**struct tcphdr**).
+ *
+ * Returns
+ * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
+ * error otherwise.
+ */
+static long (*bpf_tcp_check_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100;
+
+/*
+ * bpf_sysctl_get_name
+ *
+ * Get name of sysctl in /proc/sys/ and copy it into provided by
+ * program buffer *buf* of size *buf_len*.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
+ * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
+ * only (e.g. "tcp_mem").
+ *
+ * Returns
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ */
+static long (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101;
+
+/*
+ * bpf_sysctl_get_current_value
+ *
+ * Get current value of sysctl as it is presented in /proc/sys
+ * (incl. newline, etc), and copy it as a string into provided
+ * by program buffer *buf* of size *buf_len*.
+ *
+ * The whole value is copied, no matter what file position user
+ * space issued e.g. sys_read at.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Returns
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if current value was unavailable, e.g. because
+ * sysctl is uninitialized and read returns -EIO for it.
+ */
+static long (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102;
+
+/*
+ * bpf_sysctl_get_new_value
+ *
+ * Get new value being written by user space to sysctl (before
+ * the actual write happens) and copy it as a string into
+ * provided by program buffer *buf* of size *buf_len*.
+ *
+ * User space may write new value at file position > 0.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Returns
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if sysctl is being read.
+ */
+static long (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103;
+
+/*
+ * bpf_sysctl_set_new_value
+ *
+ * Override new value being written by user space to sysctl with
+ * value provided by program in buffer *buf* of size *buf_len*.
+ *
+ * *buf* should contain a string in same form as provided by user
+ * space on sysctl write.
+ *
+ * User space may write new value at file position > 0. To override
+ * the whole sysctl value file position should be set to zero.
+ *
+ * Returns
+ * 0 on success.
+ *
+ * **-E2BIG** if the *buf_len* is too big.
+ *
+ * **-EINVAL** if sysctl is being read.
+ */
+static long (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104;
+
+/*
+ * bpf_strtol
+ *
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to a long integer according to the given base
+ * and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)) followed by a single
+ * optional '**-**' sign.
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtol**\ (3).
+ *
+ * Returns
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ */
+static long (*bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105;
+
+/*
+ * bpf_strtoul
+ *
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to an unsigned long integer according to the
+ * given base and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)).
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtoul**\ (3).
+ *
+ * Returns
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ */
+static long (*bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106;
+
+/*
+ * bpf_sk_storage_get
+ *
+ * Get a bpf-local-storage from a *sk*.
+ *
+ * Logically, it could be thought of getting the value from
+ * a *map* with *sk* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
+ * helper enforces the key must be a full socket and the map must
+ * be a **BPF_MAP_TYPE_SK_STORAGE** also.
+ *
+ * Underneath, the value is stored locally at *sk* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf-local-storages residing at *sk*.
+ *
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
+ * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf-local-storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf-local-storage. If *value* is
+ * **NULL**, the new bpf-local-storage will be zero initialized.
+ *
+ * Returns
+ * A bpf-local-storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf-local-storage.
+ */
+static void *(*bpf_sk_storage_get)(void *map, void *sk, void *value, __u64 flags) = (void *) 107;
+
+/*
+ * bpf_sk_storage_delete
+ *
+ * Delete a bpf-local-storage from a *sk*.
+ *
+ * Returns
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
+ */
+static long (*bpf_sk_storage_delete)(void *map, void *sk) = (void *) 108;
+
+/*
+ * bpf_send_signal
+ *
+ * Send signal *sig* to the process of the current task.
+ * The signal may be delivered to any of this process's threads.
+ *
+ * Returns
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
+ */
+static long (*bpf_send_signal)(__u32 sig) = (void *) 109;
+
+/*
+ * bpf_tcp_gen_syncookie
+ *
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
+ *
+ * *iph* points to the start of the IPv4 or IPv6 header, while
+ * *iph_len* contains **sizeof**\ (**struct iphdr**) or
+ * **sizeof**\ (**struct ip6hdr**).
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header.
+ *
+ * Returns
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** SYN cookie cannot be issued due to error
+ *
+ * **-ENOENT** SYN cookie should not be issued (no SYN flood)
+ *
+ * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
+ *
+ * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
+ */
+static __s64 (*bpf_tcp_gen_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110;
+
+/*
+ * bpf_skb_output
+ *
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct sk_buff.
+ *
+ * This helper is similar to **bpf_perf_event_output**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111;
+
+/*
+ * bpf_probe_read_user
+ *
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112;
+
+/*
+ * bpf_probe_read_kernel
+ *
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113;
+
+/*
+ * bpf_probe_read_user_str
+ *
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, returns the number of bytes that were written,
+ * including the terminal NUL. This makes this helper useful in
+ * tracing programs for reading strings, and more importantly to
+ * get its length at runtime. See the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user**\ () helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ *
+ * Returns
+ * On success, the strictly positive length of the output string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ */
+static long (*bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114;
+
+/*
+ * bpf_probe_read_kernel_str
+ *
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
+ *
+ * Returns
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
+ */
+static long (*bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115;
+
+/*
+ * bpf_tcp_send_ack
+ *
+ * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
+ * *rcv_nxt* is the ack_seq to be sent out.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116;
+
+/*
+ * bpf_send_signal_thread
+ *
+ * Send signal *sig* to the thread corresponding to the current task.
+ *
+ * Returns
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
+ */
+static long (*bpf_send_signal_thread)(__u32 sig) = (void *) 117;
+
+/*
+ * bpf_jiffies64
+ *
+ * Obtain the 64bit jiffies
+ *
+ * Returns
+ * The 64 bit jiffies
+ */
+static __u64 (*bpf_jiffies64)(void) = (void *) 118;
+
+/*
+ * bpf_read_branch_records
+ *
+ * For an eBPF program attached to a perf event, retrieve the
+ * branch records (**struct perf_branch_entry**) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *size* bytes.
+ *
+ * Returns
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ * instead return the number of bytes required to store all the
+ * branch entries. If this flag is set, *buf* may be NULL.
+ *
+ * **-EINVAL** if arguments invalid or **size** not a multiple
+ * of **sizeof**\ (**struct perf_branch_entry**\ ).
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ */
+static long (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119;
+
+/*
+ * bpf_get_ns_current_pid_tgid
+ *
+ * Returns 0 on success, values for *pid* and *tgid* as seen from the current
+ * *namespace* will be returned in *nsdata*.
+ *
+ * Returns
+ * 0 on success, or one of the following in case of failure:
+ *
+ * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
+ * with nsfs of current task, or if dev conversion to dev_t lost high bits.
+ *
+ * **-ENOENT** if pidns does not exists for the current task.
+ */
+static long (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120;
+
+/*
+ * bpf_xdp_output
+ *
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct xdp_buff.
+ *
+ * This helper is similar to **bpf_perf_eventoutput**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121;
+
+/*
+ * bpf_get_netns_cookie
+ *
+ * Retrieve the cookie (generated by the kernel) of the network
+ * namespace the input *ctx* is associated with. The network
+ * namespace cookie remains stable for its lifetime and provides
+ * a global identifier that can be assumed unique. If *ctx* is
+ * NULL, then the helper returns the cookie for the initial
+ * network namespace. The cookie itself is very similar to that
+ * of **bpf_get_socket_cookie**\ () helper, but for network
+ * namespaces instead of sockets.
+ *
+ * Returns
+ * A 8-byte long opaque number.
+ */
+static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *) 122;
+
+/*
+ * bpf_get_current_ancestor_cgroup_id
+ *
+ * Return id of cgroup v2 that is ancestor of the cgroup associated
+ * with the current task at the *ancestor_level*. The root cgroup
+ * is at *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with the current task, then return value will be the
+ * same as that of **bpf_get_current_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with the current task.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_get_current_cgroup_id**\ ().
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123;
+
+/*
+ * bpf_sk_assign
+ *
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ * **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
+ * Assign the *sk* to the *skb*. When combined with appropriate
+ * routing configuration to receive the packet towards the socket,
+ * will cause *skb* to be delivered to the specified socket.
+ * Subsequent redirection of *skb* via **bpf_redirect**\ (),
+ * **bpf_clone_redirect**\ () or other methods outside of BPF may
+ * interfere with successful delivery to the socket.
+ *
+ * This operation is only valid from TC ingress path.
+ *
+ * The *flags* argument must be zero.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EINVAL** if specified *flags* are not supported.
+ *
+ * **-ENOENT** if the socket is unavailable for assignment.
+ *
+ * **-ENETUNREACH** if the socket is unreachable (wrong netns).
+ *
+ * **-EOPNOTSUPP** if the operation is not supported, for example
+ * a call from outside of TC ingress.
+ *
+ * **-ESOCKTNOSUPPORT** if the socket type is not supported
+ * (reuseport).
+ */
+static long (*bpf_sk_assign)(void *ctx, void *sk, __u64 flags) = (void *) 124;
+
+/*
+ * bpf_ktime_get_boot_ns
+ *
+ * Return the time elapsed since system boot, in nanoseconds.
+ * Does include the time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
+ *
+ * Returns
+ * Current *ktime*.
+ */
+static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *) 125;
+
+/*
+ * bpf_seq_printf
+ *
+ * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
+ * out the format string.
+ * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
+ * the format string itself. The *data* and *data_len* are format string
+ * arguments. The *data* are a **u64** array and corresponding format string
+ * values are stored in the array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data* array.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
+ *
+ * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
+ * Reading kernel memory may fail due to either invalid address or
+ * valid address but requiring a major memory fault. If reading kernel memory
+ * fails, the string for **%s** will be an empty string, and the ip
+ * address for **%p{i,I}{4,6}** will be 0. Not returning error to
+ * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
+ * by returning 1 from bpf program.
+ *
+ * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
+ *
+ * **-E2BIG** if *fmt* contains too many format specifiers.
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ */
+static long (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126;
+
+/*
+ * bpf_seq_write
+ *
+ * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
+ * The *m* represents the seq_file. The *data* and *len* represent the
+ * data to write in bytes.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ */
+static long (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127;
+
+/*
+ * bpf_sk_cgroup_id
+ *
+ * Return the cgroup v2 id of the socket *sk*.
+ *
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
+ * returned from **bpf_sk_lookup_xxx**\ (),
+ * **bpf_sk_fullsock**\ (), etc. The format of returned id is
+ * same as in **bpf_skb_cgroup_id**\ ().
+ *
+ * This helper is available only if the kernel was compiled with
+ * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *) 128;
+
+/*
+ * bpf_sk_ancestor_cgroup_id
+ *
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *sk* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *sk*, then return value will be same as that
+ * of **bpf_sk_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *sk*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_sk_cgroup_id**\ ().
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = (void *) 129;
+
+/*
+ * bpf_ringbuf_output
+ *
+ * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * An adaptive notification is a notification sent whenever the user-space
+ * process has caught up and consumed all available payloads. In case the user-space
+ * process is still processing a previous payload, then no notification is needed
+ * as it will process the newly added payload automatically.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130;
+
+/*
+ * bpf_ringbuf_reserve
+ *
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
+ * *flags* must be 0.
+ *
+ * Returns
+ * Valid pointer with *size* bytes of memory available; NULL,
+ * otherwise.
+ */
+static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131;
+
+/*
+ * bpf_ringbuf_submit
+ *
+ * Submit reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ *
+ * Returns
+ * Nothing. Always succeeds.
+ */
+static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132;
+
+/*
+ * bpf_ringbuf_discard
+ *
+ * Discard reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ *
+ * Returns
+ * Nothing. Always succeeds.
+ */
+static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133;
+
+/*
+ * bpf_ringbuf_query
+ *
+ * Query various characteristics of provided ring buffer. What
+ * exactly is queries is determined by *flags*:
+ *
+ * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ * * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ * Data returned is just a momentary snapshot of actual values
+ * and could be inaccurate, so this facility should be used to
+ * power heuristics and for reporting, not to make 100% correct
+ * calculation.
+ *
+ * Returns
+ * Requested value, or 0, if *flags* are not recognized.
+ */
+static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134;
+
+/*
+ * bpf_csum_level
+ *
+ * Change the skbs checksum level by one layer up or down, or
+ * reset it entirely to none in order to have the stack perform
+ * checksum validation. The level is applicable to the following
+ * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
+ * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
+ * through **bpf_skb_adjust_room**\ () helper with passing in
+ * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
+ * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
+ * the UDP header is removed. Similarly, an encap of the latter
+ * into the former could be accompanied by a helper call to
+ * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
+ * skb is still intended to be processed in higher layers of the
+ * stack instead of just egressing at tc.
+ *
+ * There are three supported level settings at this time:
+ *
+ * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
+ * sets CHECKSUM_NONE to force checksum validation by the stack.
+ * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
+ * skb->csum_level.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure. In the
+ * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
+ * is returned or the error code -EACCES in case the skb is not
+ * subject to CHECKSUM_UNNECESSARY.
+ */
+static long (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135;
+
+/*
+ * bpf_skc_to_tcp6_sock
+ *
+ * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136;
+
+/*
+ * bpf_skc_to_tcp_sock
+ *
+ * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = (void *) 137;
+
+/*
+ * bpf_skc_to_tcp_timewait_sock
+ *
+ * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138;
+
+/*
+ * bpf_skc_to_tcp_request_sock
+ *
+ * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139;
+
+/*
+ * bpf_skc_to_udp6_sock
+ *
+ * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = (void *) 140;
+
+/*
+ * bpf_get_task_stack
+ *
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_task_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=
+ *
+ * Returns
+ * A non-negative value equal to or less than *size* on success,
+ * or a negative error in case of failure.
+ */
+static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141;
+
+/*
+ * bpf_load_hdr_opt
+ *
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ *
+ * Returns
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ */
+static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, __u32 len, __u64 flags) = (void *) 142;
+
+/*
+ * bpf_loop
+ *
+ * For **nr_loops**, call **callback_fn** function
+ * with **callback_ctx** as the context parameter.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0. Currently, nr_loops is
+ * limited to 1 << 23 (~8 million) loops.
+ *
+ * long (\*callback_fn)(u32 index, void \*ctx);
+ *
+ * where **index** is the current index in the loop. The index
+ * is zero-indexed.
+ *
+ * If **callback_fn** returns 0, the helper will continue to the next
+ * loop. If return value is 1, the helper will skip the rest of
+ * the loops and return. Other return values are not used now,
+ * and will be rejected by the verifier.
+ *
+ *
+ * Returns
+ * The number of loops performed, **-EINVAL** for invalid **flags**,
+ * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
+ */
+static long (* const bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 181;
+
+/*
+ * bpf_store_hdr_opt
+ *
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ *
+ * Returns
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failrue to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ */
+static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, __u32 len, __u64 flags) = (void *) 143;
+
+/*
+ * bpf_reserve_hdr_opt
+ *
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ *
+ * Returns
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ */
+static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, __u32 len, __u64 flags) = (void *) 144;
+
+/*
+ * bpf_inode_storage_get
+ *
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ *
+ * Returns
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ */
+static void *(*bpf_inode_storage_get)(void *map, void *inode, void *value, __u64 flags) = (void *) 145;
+
+/*
+ * bpf_inode_storage_delete
+ *
+ * Delete a bpf_local_storage from an *inode*.
+ *
+ * Returns
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ */
+static int (*bpf_inode_storage_delete)(void *map, void *inode) = (void *) 146;
+
+/*
+ * bpf_d_path
+ *
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ *
+ * Returns
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ */
+static long (*bpf_d_path)(struct path *path, char *buf, __u32 sz) = (void *) 147;
+
+/*
+ * bpf_copy_from_user
+ *
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_copy_from_user)(void *dst, __u32 size, const void *user_ptr) = (void *) 148;
+
+/*
+ * bpf_snprintf_btf
+ *
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ *
+ * Returns
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ */
+static long (*bpf_snprintf_btf)(char *str, __u32 str_size, struct btf_ptr *ptr, __u32 btf_ptr_size, __u64 flags) = (void *) 149;
+
+/*
+ * bpf_seq_printf_btf
+ *
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ *
+ * Returns
+ * 0 on success or a negative error in case of failure.
+ */
+static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, __u32 ptr_size, __u64 flags) = (void *) 150;
+
+/*
+ * bpf_skb_cgroup_classid
+ *
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ *
+ * Returns
+ * The id is returned or 0 in case the id could not be retrieved.
+ */
+static __u64 (*bpf_skb_cgroup_classid)(struct __sk_buff *skb) = (void *) 151;
+
+/*
+ * bpf_redirect_neigh
+ *
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * relies on the neighbor lookup for the L2 address of the nexthop.
+ *
+ * The helper will perform a FIB lookup based on the skb's
+ * networking header to get the address of the next hop, unless
+ * this is supplied by the caller in the *params* argument. The
+ * *plen* argument indicates the len of *params* and should be set
+ * to 0 if *params* is NULL.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ *
+ * Returns
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ */
+static long (*bpf_redirect_neigh)(__u32 ifindex, struct bpf_redir_neigh *params, int plen, __u64 flags) = (void *) 152;
+
+/*
+ * bpf_per_cpu_ptr
+ *
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ *
+ * Returns
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ */
+static void *(*bpf_per_cpu_ptr)(const void *percpu_ptr, __u32 cpu) = (void *) 153;
+
+/*
+ * bpf_this_cpu_ptr
+ *
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ *
+ * Returns
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ */
+static void *(*bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154;
+
+/*
+ * bpf_redirect_peer
+ *
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ *
+ * Returns
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ */
+static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155;
+
+/*
+ * bpf_task_storage_get
+ *
+ * Get a bpf_local_storage from the *task*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *task* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
+ * helper enforces the key must be an task_struct and the map must also
+ * be a **BPF_MAP_TYPE_TASK_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *task* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *task*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ *
+ * Returns
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ */
+static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156;
+
+/*
+ * bpf_task_storage_delete
+ *
+ * Delete a bpf_local_storage from a *task*.
+ *
+ * Returns
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ */
+static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157;
+
+/*
+ * bpf_get_current_task_btf
+ *
+ * Return a BTF pointer to the "current" task.
+ * This pointer can also be used in helpers that accept an
+ * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
+ *
+ * Returns
+ * Pointer to the current task.
+ */
+static struct task_struct *(*bpf_get_current_task_btf)(void) = (void *) 158;
+
+/*
+ * bpf_bprm_opts_set
+ *
+ * Set or clear certain options on *bprm*:
+ *
+ * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
+ * which sets the **AT_SECURE** auxv for glibc. The bit
+ * is cleared if the flag is not specified.
+ *
+ * Returns
+ * **-EINVAL** if invalid *flags* are passed, zero otherwise.
+ */
+static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159;
+
+/*
+ * bpf_ktime_get_coarse_ns
+ *
+ * Return a coarse-grained version of the time elapsed since
+ * system boot, in nanoseconds. Does not include time the system
+ * was suspended.
+ *
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
+ *
+ * Returns
+ * Current *ktime*.
+ */
+static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160;
+
+/*
+ * bpf_ima_inode_hash
+ *
+ * Returns the stored IMA hash of the *inode* (if it's avaialable).
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ *
+ * Returns
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
+ * invalid arguments are passed.
+ */
+static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161;
+
+/*
+ * bpf_sock_from_file
+ *
+ * If the given file represents a socket, returns the associated
+ * socket.
+ *
+ * Returns
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
+ */
+static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162;
+
+/*
+ * bpf_check_mtu
+ *
+ * Check packet size against exceeding MTU of net device (based
+ * on *ifindex*). This helper will likely be used in combination
+ * with helpers that adjust/change the packet size.
+ *
+ * The argument *len_diff* can be used for querying with a planned
+ * size change. This allows to check MTU prior to changing packet
+ * ctx. Providing an *len_diff* adjustment that is larger than the
+ * actual packet size (resulting in negative packet size) will in
+ * principle not exceed the MTU, why it is not considered a
+ * failure. Other BPF-helpers are needed for performing the
+ * planned size change, why the responsibility for catch a negative
+ * packet size belong in those helpers.
+ *
+ * Specifying *ifindex* zero means the MTU check is performed
+ * against the current net device. This is practical if this isn't
+ * used prior to redirect.
+ *
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
+ * The Linux kernel route table can configure MTUs on a more
+ * specific per route level, which is not provided by this helper.
+ * For route level MTU checks use the **bpf_fib_lookup**\ ()
+ * helper.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** for tc cls_act programs.
+ *
+ * The *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_MTU_CHK_SEGS**
+ * This flag will only works for *ctx* **struct sk_buff**.
+ * If packet context contains extra packet segment buffers
+ * (often knows as GSO skb), then MTU check is harder to
+ * check at this point, because in transmit path it is
+ * possible for the skb packet to get re-segmented
+ * (depending on net device features). This could still be
+ * a MTU violation, so this flag enables performing MTU
+ * check against segments, with a different violation
+ * return code to tell it apart. Check cannot use len_diff.
+ *
+ * On return *mtu_len* pointer contains the MTU value of the net
+ * device. Remember the net device configured MTU is the L3 size,
+ * which is returned here and XDP and TC length operate at L2.
+ * Helper take this into account for you, but remember when using
+ * MTU value in your BPF-code.
+ *
+ *
+ * Returns
+ * * 0 on success, and populate MTU value in *mtu_len* pointer.
+ *
+ * * < 0 if any input argument is invalid (*mtu_len* not updated)
+ *
+ * MTU violations return positive values, but also populate MTU
+ * value in *mtu_len* pointer, as this can be needed for
+ * implementing PMTU handing:
+ *
+ * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
+ * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
+ */
+static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163;
+
+/*
+ * bpf_for_each_map_elem
+ *
+ * For each element in **map**, call **callback_fn** function with
+ * **map**, **callback_ctx** and other map-specific parameters.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0.
+ *
+ * The following are a list of supported map types and their
+ * respective expected callback signatures:
+ *
+ * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
+ * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
+ *
+ * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
+ *
+ * For per_cpu maps, the map_value is the value on the cpu where the
+ * bpf_prog is running.
+ *
+ * If **callback_fn** return 0, the helper will continue to the next
+ * element. If return value is 1, the helper will skip the rest of
+ * elements and return. Other return values are not used now.
+ *
+ *
+ * Returns
+ * The number of traversed map elements for success, **-EINVAL** for
+ * invalid **flags**.
+ */
+static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164;
+
+/*
+ * bpf_snprintf
+ *
+ * Outputs a string into the **str** buffer of size **str_size**
+ * based on a format string stored in a read-only map pointed by
+ * **fmt**.
+ *
+ * Each format specifier in **fmt** corresponds to one u64 element
+ * in the **data** array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data*
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
+ *
+ * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
+ * memory. Reading kernel memory may fail due to either invalid
+ * address or valid address but requiring a major memory fault. If
+ * reading kernel memory fails, the string for **%s** will be an
+ * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
+ * Not returning error to bpf program is consistent with what
+ * **bpf_trace_printk**\ () does for now.
+ *
+ *
+ * Returns
+ * The strictly positive length of the formatted string, including
+ * the trailing zero character. If the return value is greater than
+ * **str_size**, **str** contains a truncated string, guaranteed to
+ * be zero-terminated except when **str_size** is 0.
+ *
+ * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ */
+static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165;
+
+/*
+ * bpf_sys_bpf
+ *
+ * Execute bpf syscall with given arguments.
+ *
+ * Returns
+ * A syscall result.
+ */
+static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166;
+
+/*
+ * bpf_btf_find_by_name_kind
+ *
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ *
+ * Returns
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ */
+static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167;
+
+/*
+ * bpf_sys_close
+ *
+ * Execute close syscall for given FD.
+ *
+ * Returns
+ * A syscall result.
+ */
+static long (*bpf_sys_close)(__u32 fd) = (void *) 168;
+
+/*
+ * bpf_timer_init
+ *
+ * Initialize the timer.
+ * First 4 bits of *flags* specify clockid.
+ * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
+ * All other bits of *flags* are reserved.
+ * The verifier will reject the program if *timer* is not from
+ * the same *map*.
+ *
+ * Returns
+ * 0 on success.
+ * **-EBUSY** if *timer* is already initialized.
+ * **-EINVAL** if invalid *flags* are passed.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ */
+static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = (void *) 169;
+
+/*
+ * bpf_timer_set_callback
+ *
+ * Configure the timer to call *callback_fn* static function.
+ *
+ * Returns
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ */
+static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = (void *) 170;
+
+/*
+ * bpf_timer_start
+ *
+ * Set timer expiration N nanoseconds from the current time. The
+ * configured callback will be invoked in soft irq context on some cpu
+ * and will not repeat unless another bpf_timer_start() is made.
+ * In such case the next invocation can migrate to a different cpu.
+ * Since struct bpf_timer is a field inside map element the map
+ * owns the timer. The bpf_timer_set_callback() will increment refcnt
+ * of BPF program to make sure that callback_fn code stays valid.
+ * When user space reference to a map reaches zero all timers
+ * in a map are cancelled and corresponding program's refcnts are
+ * decremented. This is done to make sure that Ctrl-C of a user
+ * process doesn't leave any timers running. If map is pinned in
+ * bpffs the callback_fn can re-arm itself indefinitely.
+ * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
+ * cancel and free the timer in the given map element.
+ * The map can contain timers that invoke callback_fn-s from different
+ * programs. The same callback_fn can serve different timers from
+ * different maps if key/value layout matches across maps.
+ * Every bpf_timer_set_callback() can have different callback_fn.
+ *
+ *
+ * Returns
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
+ * or invalid *flags* are passed.
+ */
+static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = (void *) 171;
+
+/*
+ * bpf_timer_cancel
+ *
+ * Cancel the timer and wait for callback_fn to finish if it was running.
+ *
+ * Returns
+ * 0 if the timer was not active.
+ * 1 if the timer was active.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
+ * own timer which would have led to a deadlock otherwise.
+ */
+static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172;
+
+/*
+ * bpf_get_func_ip
+ *
+ * Get address of the traced function (for tracing and kprobe programs).
+ *
+ * Returns
+ * Address of the traced function.
+ */
+static __u64 (*bpf_get_func_ip)(void *ctx) = (void *) 173;
+
+/*
+ * bpf_get_attach_cookie
+ *
+ * Get bpf_cookie value provided (optionally) during the program
+ * attachment. It might be different for each individual
+ * attachment, even if BPF program itself is the same.
+ * Expects BPF program context *ctx* as a first argument.
+ *
+ * Supported for the following program types:
+ * - kprobe/uprobe;
+ * - tracepoint;
+ * - perf_event.
+ *
+ * Returns
+ * Value specified by user at BPF link creation/attachment time
+ * or 0, if it was not specified.
+ */
+static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *) 174;
+
+/*
+ * bpf_task_pt_regs
+ *
+ * Get the struct pt_regs associated with **task**.
+ *
+ * Returns
+ * A pointer to struct pt_regs.
+ */
+static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *) 175;
+
+/*
+ * bpf_get_branch_snapshot
+ *
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ *
+ * Returns
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ */
+static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = (void *) 176;
+
+/*
+ * bpf_trace_vprintk
+ *
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ *
+ * Returns
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ */
+static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177;
+
+/*
+ * bpf_skc_to_unix_sock
+ *
+ * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) = (void *) 178;
+
+/*
+ * bpf_kallsyms_lookup_name
+ *
+ * Get the address of a kernel symbol, returned in *res*. *res* is
+ * set to 0 if the symbol is not found.
+ *
+ * Returns
+ * On success, zero. On error, a negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ * **-ENOENT** if symbol is not found.
+ *
+ * **-EPERM** if caller does not have permission to obtain kernel address.
+ */
+static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, __u64 *res) = (void *) 179;
+
+/*
+ * bpf_find_vma
+ *
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ *
+ * Returns
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
+ */
+static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 180;
+
+
diff --git a/pkg/collector/bpf/libbpf/bpf_tracing.h b/pkg/collector/bpf/libbpf/bpf_tracing.h
new file mode 100644
index 00000000..ca324d75
--- /dev/null
+++ b/pkg/collector/bpf/libbpf/bpf_tracing.h
@@ -0,0 +1,921 @@
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#ifndef __BPF_TRACING_H__
+#define __BPF_TRACING_H__
+
+/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+#if defined(__TARGET_ARCH_x86)
+ #define bpf_target_x86
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_s390)
+ #define bpf_target_s390
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm)
+ #define bpf_target_arm
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm64)
+ #define bpf_target_arm64
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_mips)
+ #define bpf_target_mips
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_powerpc)
+ #define bpf_target_powerpc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_sparc)
+ #define bpf_target_sparc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_riscv)
+ #define bpf_target_riscv
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arc)
+ #define bpf_target_arc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_loongarch)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
+#else
+
+/* Fall back to what the compiler says */
+#if defined(__x86_64__)
+ #define bpf_target_x86
+ #define bpf_target_defined
+#elif defined(__s390__)
+ #define bpf_target_s390
+ #define bpf_target_defined
+#elif defined(__arm__)
+ #define bpf_target_arm
+ #define bpf_target_defined
+#elif defined(__aarch64__)
+ #define bpf_target_arm64
+ #define bpf_target_defined
+#elif defined(__mips__)
+ #define bpf_target_mips
+ #define bpf_target_defined
+#elif defined(__powerpc__)
+ #define bpf_target_powerpc
+ #define bpf_target_defined
+#elif defined(__sparc__)
+ #define bpf_target_sparc
+ #define bpf_target_defined
+#elif defined(__riscv) && __riscv_xlen == 64
+ #define bpf_target_riscv
+ #define bpf_target_defined
+#elif defined(__arc__)
+ #define bpf_target_arc
+ #define bpf_target_defined
+#elif defined(__loongarch__)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
+#endif /* no compiler target */
+
+#endif
+
+#ifndef __BPF_TARGET_MISSING
+#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
+#endif
+
+#if defined(bpf_target_x86)
+
+/*
+ * https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI
+ */
+
+#if defined(__KERNEL__) || defined(__VMLINUX_H__)
+
+#define __PT_PARM1_REG di
+#define __PT_PARM2_REG si
+#define __PT_PARM3_REG dx
+#define __PT_PARM4_REG cx
+#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+/*
+ * Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64
+ * comments in Linux sources. And refer to syscall(2) manpage.
+ */
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG sp
+#define __PT_FP_REG bp
+#define __PT_RC_REG ax
+#define __PT_SP_REG sp
+#define __PT_IP_REG ip
+
+#else
+
+#ifdef __i386__
+
+/* i386 kernel is built with -mregparm=3 */
+#define __PT_PARM1_REG eax
+#define __PT_PARM2_REG edx
+#define __PT_PARM3_REG ecx
+/* i386 syscall ABI is very different, refer to syscall(2) manpage */
+#define __PT_PARM1_SYSCALL_REG ebx
+#define __PT_PARM2_SYSCALL_REG ecx
+#define __PT_PARM3_SYSCALL_REG edx
+#define __PT_PARM4_SYSCALL_REG esi
+#define __PT_PARM5_SYSCALL_REG edi
+#define __PT_PARM6_SYSCALL_REG ebp
+
+#define __PT_RET_REG esp
+#define __PT_FP_REG ebp
+#define __PT_RC_REG eax
+#define __PT_SP_REG esp
+#define __PT_IP_REG eip
+
+#else /* __i386__ */
+
+#define __PT_PARM1_REG rdi
+#define __PT_PARM2_REG rsi
+#define __PT_PARM3_REG rdx
+#define __PT_PARM4_REG rcx
+#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG rsp
+#define __PT_FP_REG rbp
+#define __PT_RC_REG rax
+#define __PT_SP_REG rsp
+#define __PT_IP_REG rip
+
+#endif /* __i386__ */
+
+#endif /* __KERNEL__ || __VMLINUX_H__ */
+
+#elif defined(bpf_target_s390)
+
+/*
+ * https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf
+ */
+
+struct pt_regs___s390 {
+ unsigned long orig_gpr2;
+};
+
+/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
+#define __PT_PARM1_REG gprs[2]
+#define __PT_PARM2_REG gprs[3]
+#define __PT_PARM3_REG gprs[4]
+#define __PT_PARM4_REG gprs[5]
+#define __PT_PARM5_REG gprs[6]
+
+#define __PT_PARM1_SYSCALL_REG orig_gpr2
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG gprs[7]
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
+
+#define __PT_RET_REG gprs[14]
+#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG gprs[2]
+#define __PT_SP_REG gprs[15]
+#define __PT_IP_REG psw.addr
+
+#elif defined(bpf_target_arm)
+
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers
+ */
+
+#define __PT_PARM1_REG uregs[0]
+#define __PT_PARM2_REG uregs[1]
+#define __PT_PARM3_REG uregs[2]
+#define __PT_PARM4_REG uregs[3]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG uregs[4]
+#define __PT_PARM6_SYSCALL_REG uregs[5]
+#define __PT_PARM7_SYSCALL_REG uregs[6]
+
+#define __PT_RET_REG uregs[14]
+#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG uregs[0]
+#define __PT_SP_REG uregs[13]
+#define __PT_IP_REG uregs[12]
+
+#elif defined(bpf_target_arm64)
+
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers
+ */
+
+struct pt_regs___arm64 {
+ unsigned long orig_x0;
+};
+
+/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
+#define __PT_PARM1_REG regs[0]
+#define __PT_PARM2_REG regs[1]
+#define __PT_PARM3_REG regs[2]
+#define __PT_PARM4_REG regs[3]
+#define __PT_PARM5_REG regs[4]
+#define __PT_PARM6_REG regs[5]
+#define __PT_PARM7_REG regs[6]
+#define __PT_PARM8_REG regs[7]
+
+#define __PT_PARM1_SYSCALL_REG orig_x0
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
+
+#define __PT_RET_REG regs[30]
+#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG regs[0]
+#define __PT_SP_REG sp
+#define __PT_IP_REG pc
+
+#elif defined(bpf_target_mips)
+
+/*
+ * N64 ABI is assumed right now.
+ * https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions
+ */
+
+#define __PT_PARM1_REG regs[4]
+#define __PT_PARM2_REG regs[5]
+#define __PT_PARM3_REG regs[6]
+#define __PT_PARM4_REG regs[7]
+#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */
+
+#define __PT_RET_REG regs[31]
+#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG regs[2]
+#define __PT_SP_REG regs[29]
+#define __PT_IP_REG cp0_epc
+
+#elif defined(bpf_target_powerpc)
+
+/*
+ * http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14,
+ * section "Function Calling Sequence")
+ */
+
+#define __PT_PARM1_REG gpr[3]
+#define __PT_PARM2_REG gpr[4]
+#define __PT_PARM3_REG gpr[5]
+#define __PT_PARM4_REG gpr[6]
+#define __PT_PARM5_REG gpr[7]
+#define __PT_PARM6_REG gpr[8]
+#define __PT_PARM7_REG gpr[9]
+#define __PT_PARM8_REG gpr[10]
+
+/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG orig_gpr3
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#if !defined(__arch64__)
+#define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */
+#endif
+
+#define __PT_RET_REG regs[31]
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG gpr[3]
+#define __PT_SP_REG sp
+#define __PT_IP_REG nip
+
+#elif defined(bpf_target_sparc)
+
+/*
+ * https://en.wikipedia.org/wiki/Calling_convention#SPARC
+ */
+
+#define __PT_PARM1_REG u_regs[UREG_I0]
+#define __PT_PARM2_REG u_regs[UREG_I1]
+#define __PT_PARM3_REG u_regs[UREG_I2]
+#define __PT_PARM4_REG u_regs[UREG_I3]
+#define __PT_PARM5_REG u_regs[UREG_I4]
+#define __PT_PARM6_REG u_regs[UREG_I5]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG u_regs[UREG_I7]
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG u_regs[UREG_I0]
+#define __PT_SP_REG u_regs[UREG_FP]
+/* Should this also be a bpf_target check for the sparc case? */
+#if defined(__arch64__)
+#define __PT_IP_REG tpc
+#else
+#define __PT_IP_REG pc
+#endif
+
+#elif defined(bpf_target_riscv)
+
+/*
+ * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
+ */
+
+/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG a0
+#define __PT_PARM2_REG a1
+#define __PT_PARM3_REG a2
+#define __PT_PARM4_REG a3
+#define __PT_PARM5_REG a4
+#define __PT_PARM6_REG a5
+#define __PT_PARM7_REG a6
+#define __PT_PARM8_REG a7
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG ra
+#define __PT_FP_REG s0
+#define __PT_RC_REG a0
+#define __PT_SP_REG sp
+#define __PT_IP_REG pc
+
+#elif defined(bpf_target_arc)
+
+/*
+ * Section "Function Calling Sequence" (page 24):
+ * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
+ */
+
+/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG scratch.r0
+#define __PT_PARM2_REG scratch.r1
+#define __PT_PARM3_REG scratch.r2
+#define __PT_PARM4_REG scratch.r3
+#define __PT_PARM5_REG scratch.r4
+#define __PT_PARM6_REG scratch.r5
+#define __PT_PARM7_REG scratch.r6
+#define __PT_PARM8_REG scratch.r7
+
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG scratch.blink
+#define __PT_FP_REG scratch.fp
+#define __PT_RC_REG scratch.r0
+#define __PT_SP_REG scratch.sp
+#define __PT_IP_REG scratch.ret
+
+#elif defined(bpf_target_loongarch)
+
+/*
+ * https://docs.kernel.org/loongarch/introduction.html
+ * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+ */
+
+/* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
+#define __PT_PARM1_REG regs[4]
+#define __PT_PARM2_REG regs[5]
+#define __PT_PARM3_REG regs[6]
+#define __PT_PARM4_REG regs[7]
+#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+/* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG regs[1]
+#define __PT_FP_REG regs[22]
+#define __PT_RC_REG regs[4]
+#define __PT_SP_REG regs[3]
+#define __PT_IP_REG csr_era
+
+#endif
+
+#if defined(bpf_target_defined)
+
+struct pt_regs;
+
+/* allow some architectures to override `struct pt_regs` */
+#ifndef __PT_REGS_CAST
+#define __PT_REGS_CAST(x) (x)
+#endif
+
+/*
+ * Different architectures support different number of arguments passed
+ * through registers. i386 supports just 3, some arches support up to 8.
+ */
+#ifndef __PT_PARM4_REG
+#define __PT_PARM4_REG __unsupported__
+#endif
+#ifndef __PT_PARM5_REG
+#define __PT_PARM5_REG __unsupported__
+#endif
+#ifndef __PT_PARM6_REG
+#define __PT_PARM6_REG __unsupported__
+#endif
+#ifndef __PT_PARM7_REG
+#define __PT_PARM7_REG __unsupported__
+#endif
+#ifndef __PT_PARM8_REG
+#define __PT_PARM8_REG __unsupported__
+#endif
+/*
+ * Similarly, syscall-specific conventions might differ between function call
+ * conventions within each architecutre. All supported architectures pass
+ * either 6 or 7 syscall arguments in registers.
+ *
+ * See syscall(2) manpage for succinct table with information on each arch.
+ */
+#ifndef __PT_PARM7_SYSCALL_REG
+#define __PT_PARM7_SYSCALL_REG __unsupported__
+#endif
+
+#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
+#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
+#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
+#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
+#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
+#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG)
+#define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG)
+#define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG)
+#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
+#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
+#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
+#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
+#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
+#define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG)
+#define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG)
+#define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
+
+#if defined(bpf_target_powerpc)
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
+#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+
+#elif defined(bpf_target_sparc)
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
+#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+
+#else
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+
+#endif
+
+#ifndef PT_REGS_PARM1_SYSCALL
+#define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM2_SYSCALL
+#define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG)
+#define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM3_SYSCALL
+#define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG)
+#define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM4_SYSCALL
+#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG)
+#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM5_SYSCALL
+#define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG)
+#define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM6_SYSCALL
+#define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG)
+#define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM7_SYSCALL
+#define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG)
+#define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG)
+#endif
+
+#else /* defined(bpf_target_defined) */
+
+#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#endif /* defined(bpf_target_defined) */
+
+/*
+ * When invoked from a syscall handler kprobe, returns a pointer to a
+ * struct pt_regs containing syscall arguments and suitable for passing to
+ * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
+ */
+#ifndef PT_REGS_SYSCALL_REGS
+/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
+#endif
+
+#ifndef ___bpf_concat
+#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#endif
+#ifndef ___bpf_narg
+#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+
+#define ___bpf_ctx_cast0() ctx
+#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), ctx[0]
+#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), ctx[1]
+#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), ctx[2]
+#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), ctx[3]
+#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), ctx[4]
+#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), ctx[5]
+#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), ctx[6]
+#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), ctx[7]
+#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), ctx[8]
+#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), ctx[9]
+#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), ctx[10]
+#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), ctx[11]
+#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
+
+/*
+ * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
+ * similar kinds of BPF programs, that accept input arguments as a single
+ * pointer to untyped u64 array, where each u64 can actually be a typed
+ * pointer or integer of different size. Instead of requring user to write
+ * manual casts and work with array elements by index, BPF_PROG macro
+ * allows user to declare a list of named and typed input arguments in the
+ * same syntax as for normal C function. All the casting is hidden and
+ * performed transparently, while user code can just assume working with
+ * function arguments of specified type and name.
+ *
+ * Original raw context argument is preserved as well as 'ctx' argument.
+ * This is useful when using BPF helpers that expect original context
+ * as one of the parameters (e.g., for bpf_perf_event_output()).
+ */
+#define BPF_PROG(name, args...) \
+name(unsigned long long *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_ctx_cast(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args)
+
+#ifndef ___bpf_nth2
+#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
+ _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
+#endif
+#ifndef ___bpf_narg2
+#define ___bpf_narg2(...) \
+ ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
+ 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
+#endif
+
+#define ___bpf_treg_cnt(t) \
+ __builtin_choose_expr(sizeof(t) == 1, 1, \
+ __builtin_choose_expr(sizeof(t) == 2, 1, \
+ __builtin_choose_expr(sizeof(t) == 4, 1, \
+ __builtin_choose_expr(sizeof(t) == 8, 1, \
+ __builtin_choose_expr(sizeof(t) == 16, 2, \
+ (void)0)))))
+
+#define ___bpf_reg_cnt0() (0)
+#define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
+
+#define ___bpf_union_arg(t, x, n) \
+ __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
+ (void)0)))))
+
+#define ___bpf_ctx_arg0(n, args...)
+#define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
+#define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
+#define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
+#define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
+#define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
+#define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
+#define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
+#define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
+#define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
+#define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
+#define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
+#define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
+#define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
+
+#define ___bpf_ctx_decl0()
+#define ___bpf_ctx_decl1(t, x) , t x
+#define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args)
+#define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args)
+#define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args)
+#define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args)
+#define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args)
+#define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args)
+#define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args)
+#define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args)
+#define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args)
+#define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args)
+#define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args)
+#define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
+
+/*
+ * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
+ * arguments. Since each struct argument might take one or two u64 values
+ * in the trampoline stack, argument type size is needed to place proper number
+ * of u64 values for each argument. Therefore, BPF_PROG2 has different
+ * syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
+ *
+ * int BPF_PROG(test2, int a, int b) { ... }
+ *
+ * the corresponding BPF_PROG2 syntax is:
+ *
+ * int BPF_PROG2(test2, int, a, int, b) { ... }
+ *
+ * where type and the corresponding argument name are separated by comma.
+ *
+ * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
+ * than 8 bytes:
+ *
+ * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
+ * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
+ * {
+ * // access a, b, c, d, e, and ret directly
+ * ...
+ * }
+ */
+#define BPF_PROG2(name, args...) \
+name(unsigned long long *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ return ____##name(ctx ___bpf_ctx_arg(args)); \
+} \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
+
+struct pt_regs;
+
+#define ___bpf_kprobe_args0() ctx
+#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (unsigned long long)PT_REGS_PARM1(ctx)
+#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (unsigned long long)PT_REGS_PARM2(ctx)
+#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (unsigned long long)PT_REGS_PARM3(ctx)
+#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (unsigned long long)PT_REGS_PARM4(ctx)
+#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (unsigned long long)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (unsigned long long)PT_REGS_PARM6(ctx)
+#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (unsigned long long)PT_REGS_PARM7(ctx)
+#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (unsigned long long)PT_REGS_PARM8(ctx)
+#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
+ * low-level way of getting kprobe input arguments from struct pt_regs, and
+ * provides a familiar typed and named function arguments syntax and
+ * semantics of accessing kprobe input paremeters.
+ *
+ * Original struct pt_regs* context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ */
+#define BPF_KPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define ___bpf_kretprobe_args0() ctx
+#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (unsigned long long)PT_REGS_RC(ctx)
+#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
+ * return value (in addition to `struct pt_regs *ctx`), but no input
+ * arguments, because they will be clobbered by the time probed function
+ * returns.
+ */
+#define BPF_KRETPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kretprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+
+/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
+#define ___bpf_syscall_args0() ctx
+#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (unsigned long long)PT_REGS_PARM1_SYSCALL(regs)
+#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (unsigned long long)PT_REGS_PARM2_SYSCALL(regs)
+#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (unsigned long long)PT_REGS_PARM3_SYSCALL(regs)
+#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (unsigned long long)PT_REGS_PARM4_SYSCALL(regs)
+#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (unsigned long long)PT_REGS_PARM5_SYSCALL(regs)
+#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (unsigned long long)PT_REGS_PARM6_SYSCALL(regs)
+#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (unsigned long long)PT_REGS_PARM7_SYSCALL(regs)
+#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
+
+/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
+#define ___bpf_syswrap_args0() ctx
+#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (unsigned long long)PT_REGS_PARM1_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (unsigned long long)PT_REGS_PARM2_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (unsigned long long)PT_REGS_PARM3_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (unsigned long long)PT_REGS_PARM4_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (unsigned long long)PT_REGS_PARM5_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (unsigned long long)PT_REGS_PARM6_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (unsigned long long)PT_REGS_PARM7_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
+ * tracing syscall functions, like __x64_sys_close. It hides the underlying
+ * platform-specific low-level way of getting syscall input arguments from
+ * struct pt_regs, and provides a familiar typed and named function arguments
+ * syntax and semantics of accessing syscall input parameters.
+ *
+ * Original struct pt_regs * context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ *
+ * At the moment BPF_KSYSCALL does not transparently handle all the calling
+ * convention quirks for the following syscalls:
+ *
+ * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
+ * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
+ * CONFIG_CLONE_BACKWARDS3.
+ * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
+ * - compat syscalls.
+ *
+ * This may or may not change in the future. User needs to take extra measures
+ * to handle such quirks explicitly, if necessary.
+ *
+ * This macro relies on BPF CO-RE support and virtual __kconfig externs.
+ */
+#define BPF_KSYSCALL(name, args...) \
+name(struct pt_regs *ctx); \
+extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
+ ? (struct pt_regs *)PT_REGS_PARM1(ctx) \
+ : ctx; \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ if (LINUX_HAS_SYSCALL_WRAPPER) \
+ return ____##name(___bpf_syswrap_args(args)); \
+ else \
+ return ____##name(___bpf_syscall_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
+
+/* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE,
+ * but are named way less confusingly for SEC("uprobe") and SEC("uretprobe")
+ * use cases.
+ */
+#define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args)
+#define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args)
+
+#endif
diff --git a/pkg/collector/bpf/network/bpf_network.c b/pkg/collector/bpf/network/bpf_network.c
new file mode 100644
index 00000000..6fcd9fbf
--- /dev/null
+++ b/pkg/collector/bpf/network/bpf_network.c
@@ -0,0 +1,52 @@
+//go:build ignore
+
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#include "bpf_network.h"
+
+char __license[] SEC("license") = "GPL";
+
+/**
+ * Network related programs.
+ *
+ * These are internal kernel functions that we are tracing and the
+ * names can be architecture dependent. So we need to check
+ * /proc/kallsmys at runtime and check correct function name and
+ * hook the program.
+ *
+ * If we use fentry (which should be theoritically more performant)
+ * we will have to set correct function name at runtime which is
+ * complicated to achieve. So, we use kprobes for these events.
+ *
+ * Inital benchmarks showed that fentry/fexit is 100-150 ns faster
+ * than kprobes.
+ *
+ * However, cilium ebpf refused to load program __netif_receive_skb_core
+ * on newer kernels as there is wrapper exported function
+ * netif_receive_skb_core in kernels 6.x.
+ * In older kernels there is a bug that is preventing the tracing
+ * functions to access skb pointer.
+ *
+ * To avoid more complications, we use ONLY kprobes which should work
+ * in all cases.
+ *
+ * NOTE that we still need to find architectural specific names
+ * before loading the program by lookin at /proc/kallsyms
+*/
+
+SEC("kprobe/__netif_receive_skb_core")
+__u64 kprobe___netif_receive_skb_core(struct pt_regs *ctx)
+{
+ struct sk_buff *skb;
+ bpf_probe_read_kernel(&skb, sizeof(skb), (void *) _(PT_REGS_PARM1(ctx)));
+
+ return handle_skb_event(skb, MODE_INGRESS);
+}
+
+SEC("kprobe/__dev_queue_xmit")
+__u64 kprobe___dev_queue_xmit(struct pt_regs *ctx)
+{
+ struct sk_buff *skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
+
+ return handle_skb_event(skb, MODE_EGRESS);
+}
diff --git a/pkg/collector/bpf/network/bpf_network.h b/pkg/collector/bpf/network/bpf_network.h
new file mode 100644
index 00000000..5c9bcaf2
--- /dev/null
+++ b/pkg/collector/bpf/network/bpf_network.h
@@ -0,0 +1,117 @@
+//go:build ignore
+
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#include "vmlinux.h"
+#include "compiler.h"
+#include "net_shared.h"
+
+#include "bpf_tracing.h"
+#include "bpf_core_read.h"
+
+#include "bpf_cgroup.h"
+
+enum net_mode {
+ MODE_INGRESS,
+ MODE_EGRESS
+};
+
+/* network related event key struct */
+struct net_event_key {
+ __u32 cid; /* cgroup ID */
+ __u8 dev[16]; /* Device name */
+};
+
+/* Any network IPv4/IPv6 related event */
+struct net_event {
+ __u64 packets; /* Packets counter */
+ __u64 bytes; /* Bytes counter */
+};
+
+/* Map to track ingress events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, struct net_event_key); /* Key is the vfs_event_key struct */
+ __type(value, struct net_event);
+} ingress_accumulator SEC(".maps");
+
+/* Map to track ingress events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, struct net_event_key); /* Key is the net_event_key struct */
+ __type(value, struct net_event);
+} egress_accumulator SEC(".maps");
+
+/**
+ * handle_skb_event updates the maps with event by incrementing packets
+ * and bytes counters to the existing event
+ * @skb: target skb
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_skb_event(struct sk_buff *skb, int type)
+{
+ __u32 len;
+ struct net_device *dev;
+
+ struct net_event_key key = {0};
+
+ // Get cgroup ID
+ key.cid = (__u32) ceems_get_current_cgroup_id();
+
+ // If cgroup id is 1, it means it is root cgroup and we are not really interested
+ // in it and so return
+ // Similarly if cgroup id is 0, it means we failed to get cgroup ID
+ if (key.cid == 0 || key.cid == 1)
+ return TC_ACT_OK;
+
+ // Read packet bytes and device name
+ bpf_probe_read_kernel(&len, sizeof(len), _(&skb->len));
+ bpf_probe_read_kernel(&dev, sizeof(dev), _(&skb->dev));
+ bpf_probe_read_kernel_str(&key.dev, sizeof(key.dev), _(&dev->name));
+
+ struct net_event *event;
+
+ // Fetch event from correct map
+ switch (type) {
+ case MODE_INGRESS:
+ event = bpf_map_lookup_elem(&ingress_accumulator, &key);
+ break;
+ case MODE_EGRESS:
+ event = bpf_map_lookup_elem(&egress_accumulator, &key);
+ break;
+ default:
+ return TC_ACT_OK;
+ }
+
+ // Get packet size
+ __u64 bytes = (__u64) bpf_ntohs(len);
+
+ if (!event) {
+ // New event with increment call counter
+ struct net_event new_event = { .packets = 1, .bytes = bytes };
+
+ // Update map with new key and event
+ switch (type) {
+ case MODE_INGRESS:
+ bpf_map_update_elem(&ingress_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_EGRESS:
+ bpf_map_update_elem(&egress_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ default:
+ return TC_ACT_OK;
+ }
+
+ return TC_ACT_OK;
+ }
+
+ // Always increment calls
+ __sync_fetch_and_add(&event->packets, 1);
+ __sync_fetch_and_add(&event->bytes, bytes);
+
+ // Let the packet pass
+ return TC_ACT_OK;
+}
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.c b/pkg/collector/bpf/vfs/bpf_vfs.c
new file mode 100644
index 00000000..e017df83
--- /dev/null
+++ b/pkg/collector/bpf/vfs/bpf_vfs.c
@@ -0,0 +1,233 @@
+//go:build ignore
+
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#include "bpf_vfs.h"
+
+char __license[] SEC("license") = "GPL";
+
+/**
+ * VFS related events.
+ *
+ * BPF trampolines are implemented in ARM64 with limited
+ * functionality only in kernel version 6.0.
+ *
+ * So we use fentry/fexit only for x86 architecture and for
+ * the rest we use kprobe/kretprobes.
+ */
+#if defined(__TARGET_ARCH_x86)
+
+SEC("fexit/vfs_write")
+__u64 BPF_PROG(fexit_vfs_write, struct file *file,
+ const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+{
+ return handle_rw_event(file, (__s64)ret, MODE_WRITE);
+}
+
+SEC("fexit/vfs_read")
+__u64 BPF_PROG(fexit_vfs_read, struct file *file,
+ char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+{
+ return handle_rw_event(file, (__s64)ret, MODE_READ);
+}
+
+SEC("fexit/vfs_writev")
+__u64 BPF_PROG(fexit_vfs_writev, struct file *file,
+ const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+{
+ return handle_rw_event(file, (__s64)ret, MODE_WRITE);
+}
+
+SEC("fexit/vfs_readv")
+__u64 BPF_PROG(fexit_vfs_readv, struct file *file,
+ char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+{
+ return handle_rw_event(file, (__s64)ret, MODE_READ);
+}
+
+SEC("fexit/vfs_open")
+__u64 BPF_PROG(fexit_vfs_open, const struct path *path, struct file *file, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_OPEN);
+}
+
+/**
+ * Functions vfs_create, vfs_open, vfs_rmdir vfs_mkdir and vfs_unlink
+ * have different function signatures depending on kernel version.
+ *
+ * The current pre-processors will compile different programs based
+ * on the kernel version.
+ *
+ * From initial benchmark tests the difference between fexit and kretprobe
+ * is around 100-150 ns in the favour of fexit (as expected).
+ */
+#if defined(__KERNEL_PRE_v511)
+
+SEC("fexit/vfs_create")
+__u64 BPF_PROG(fexit_vfs_create, struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_CREATE);
+}
+
+SEC("fexit/vfs_mkdir")
+__u64 BPF_PROG(fexit_vfs_mkdir, struct inode *dir, struct dentry *dentry, umode_t mode,
+ int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
+}
+
+SEC("fexit/vfs_unlink")
+__u64 BPF_PROG(fexit_vfs_unlink, struct inode *dir, struct dentry *dentry,
+ struct inode **pdir, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
+}
+
+SEC("fexit/vfs_rmdir")
+__u64 BPF_PROG(fexit_vfs_rmdir, struct inode *dir, struct dentry *dentry, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
+}
+
+#elif defined(__KERNEL_POST_v512_PRE_v62)
+
+SEC("fexit/vfs_create")
+__u64 BPF_PROG(fexit_vfs_create, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool want_excl, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_CREATE);
+}
+
+SEC("fexit/vfs_mkdir")
+__u64 BPF_PROG(fexit_vfs_mkdir, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
+}
+
+SEC("fexit/vfs_unlink")
+__u64 BPF_PROG(fexit_vfs_unlink, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, struct inode **pdir, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
+}
+
+SEC("fexit/vfs_rmdir")
+__u64 BPF_PROG(fexit_vfs_rmdir, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
+}
+
+#else
+
+SEC("fexit/vfs_create")
+__u64 BPF_PROG(fexit_vfs_create, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool want_excl, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_CREATE);
+}
+
+SEC("fexit/vfs_mkdir")
+__u64 BPF_PROG(fexit_vfs_mkdir, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
+}
+
+SEC("fexit/vfs_unlink")
+__u64 BPF_PROG(fexit_vfs_unlink, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, struct inode **pdir, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
+}
+
+SEC("fexit/vfs_rmdir")
+__u64 BPF_PROG(fexit_vfs_rmdir, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, int ret)
+{
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
+}
+
+#endif
+
+#else
+
+SEC("kprobe/vfs_write")
+__u64 kprobe_vfs_write(struct pt_regs *ctx)
+{
+ struct file *file = (struct file *) PT_REGS_PARM1(ctx);
+ __u64 count = (__u64) PT_REGS_PARM3(ctx);
+
+ return handle_rw_event(file, (__s64)count, MODE_WRITE);
+}
+
+SEC("kprobe/vfs_read")
+__u64 kprobe_vfs_read(struct pt_regs *ctx)
+{
+ struct file *file = (struct file *) PT_REGS_PARM1(ctx);
+ __u64 count = (__u64) PT_REGS_PARM3(ctx);
+
+ return handle_rw_event(file, (__s64)count, MODE_READ);
+}
+
+SEC("kprobe/vfs_writev")
+__u64 kprobe_vfs_writev(struct pt_regs *ctx)
+{
+ struct file *file = (struct file *) PT_REGS_PARM1(ctx);
+ __u64 count = (__u64) PT_REGS_PARM3(ctx);
+
+ return handle_rw_event(file, (__s64)count, MODE_WRITE);
+}
+
+SEC("kprobe/vfs_readv")
+__u64 kprobe_vfs_readv(struct pt_regs *ctx)
+{
+ struct file *file = (struct file *) PT_REGS_PARM1(ctx);
+ __u64 count = (__u64) PT_REGS_PARM3(ctx);
+
+ return handle_rw_event(file, (__s64)count, MODE_READ);
+}
+
+SEC("kretprobe/vfs_create")
+__u64 kretprobe_vfs_create(struct pt_regs *ctx)
+{
+ __s64 ret = (__s64) PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_CREATE);
+}
+
+SEC("kretprobe/vfs_open")
+__u64 kretprobe_vfs_open(struct pt_regs *ctx)
+{
+ __s64 ret = (__s64) PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_OPEN);
+}
+
+SEC("kretprobe/vfs_mkdir")
+__u64 kretprobe_vfs_mkdir(struct pt_regs *ctx)
+{
+ __s64 ret = (__s64) PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
+}
+
+SEC("kretprobe/vfs_unlink")
+__u64 kretprobe_vfs_unlink(struct pt_regs *ctx)
+{
+ __s64 ret = (__s64) PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
+}
+
+SEC("kretprobe/vfs_rmdir")
+__u64 kretprobe_vfs_rmdir(struct pt_regs *ctx)
+{
+ __s64 ret = (__s64) PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
+}
+
+#endif
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.h b/pkg/collector/bpf/vfs/bpf_vfs.h
new file mode 100644
index 00000000..9aa18147
--- /dev/null
+++ b/pkg/collector/bpf/vfs/bpf_vfs.h
@@ -0,0 +1,272 @@
+//go:build ignore
+
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#include "vmlinux.h"
+#include "compiler.h"
+
+#include "bpf_tracing.h"
+#include "bpf_core_read.h"
+
+#include "bpf_cgroup.h"
+#include "bpf_path.h"
+
+enum vfs_mode {
+ MODE_READ,
+ MODE_WRITE,
+ MODE_OPEN,
+ MODE_CREATE,
+ MODE_MKDIR,
+ MODE_UNLINK,
+ MODE_RMDIR
+};
+
+/* vfs related event key struct */
+struct vfs_event_key {
+ __u32 cid; /* cgroup ID */
+ __u8 mnt[64]; /* Mount point */
+};
+
+/* Any vfs read/write related event */
+struct vfs_rw_event {
+ __u64 bytes; /* Bytes accumulator */
+ __u64 calls; /* Call counter */
+ __u64 errors; /* Error counter */
+};
+
+/* Any vfs create/open/close/unlink/fsync related event */
+struct vfs_inode_event {
+ __u64 calls; /* Call counter */
+ __u64 errors; /* Error counter */
+};
+
+/* Map to track vfs_write events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
+ __type(value, struct vfs_rw_event);
+} write_accumulator SEC(".maps");
+
+/* Map to track vfs_read events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
+ __type(value, struct vfs_rw_event);
+} read_accumulator SEC(".maps");
+
+/* Map to track vfs_open events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, __u32); /* Key is the vfs_event_key struct */
+ __type(value, struct vfs_inode_event);
+} open_accumulator SEC(".maps");
+
+/* Map to track vfs_create events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, __u32); /* Key is the vfs_event_key struct */
+ __type(value, struct vfs_inode_event);
+} create_accumulator SEC(".maps");
+
+/* Map to track vfs_unlink events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, __u32); /* Key is the vfs_event_key struct */
+ __type(value, struct vfs_inode_event);
+} unlink_accumulator SEC(".maps");
+
+/**
+ * get_mnt_path returns the mount path of the current file.
+ * @key: target key
+ * @file: file struct
+ *
+ * Returns size of the mount path.
+ */
+FUNC_INLINE __u32 get_mnt_path(struct vfs_event_key *key, struct file *file)
+{
+ int flags = 0, size;
+ char *buffer;
+
+ buffer = mnt_path_local(file, &size, &flags);
+ if (!buffer)
+ return 0;
+
+ asm volatile("%[size] &= 0xff;\n"
+ : [size] "+r"(size));
+
+ bpf_probe_read(key->mnt, sizeof(key->mnt), buffer);
+
+ return (__u32)size;
+}
+
+/**
+ * handle_rw_event updates the maps with event by incrementing calls counter
+ * and bytes accumulator to the existing event
+ * @key: target key
+ * @ret: return value of kernel function
+ * @type: type of event MODE_READ, MODE_WRITE, etc
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
+ // Important to initialise the struct with some values else verifier will complain
+ struct vfs_event_key key = {0};
+
+ // Get current cgroup ID. Works for both v1 and v2
+ key.cid = (__u32) ceems_get_current_cgroup_id();
+
+ // If cgroup id is 1, it means it is root cgroup and we are not really interested
+ // in it and so return
+ // Similarly if cgroup id is 0, it means we failed to get cgroup ID
+ if (key.cid == 0 || key.cid == 1)
+ return 0;
+
+ // Get mount path of the file
+ get_mnt_path(&key, file);
+ if (!key.mnt[0])
+ return 0;
+
+ struct vfs_rw_event *event;
+
+ // Fetch event from correct map
+ switch (type) {
+ case MODE_WRITE:
+ event = bpf_map_lookup_elem(&write_accumulator, &key);
+ break;
+ case MODE_READ:
+ event = bpf_map_lookup_elem(&read_accumulator, &key);
+ break;
+ default:
+ return 0;
+ }
+
+ if (!event) {
+ // New event with increment call counter
+ struct vfs_rw_event new_event = { .bytes = 0, .calls = 1, .errors = 0 };
+
+ // In case of error increment errors else increment bytes
+ if (ret < 0) {
+ new_event.bytes = 0;
+ new_event.errors = 1;
+ } else {
+ new_event.bytes = (__u64) ret;
+ new_event.errors = 0;
+ }
+
+ // Update map with new key and event
+ switch (type) {
+ case MODE_WRITE:
+ bpf_map_update_elem(&write_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_READ:
+ bpf_map_update_elem(&read_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+ }
+
+ // Always increment calls
+ __sync_fetch_and_add(&event->calls, 1);
+
+ // In case of error increment errors else increment bytes
+ if (ret < 0) {
+ __sync_fetch_and_add(&event->errors, 1);
+ } else {
+ __sync_fetch_and_add(&event->bytes, (__u64) ret);
+ }
+
+ return 0;
+}
+
+/**
+ * handle_inode_event updates the maps with event by incrementing calls
+ * and errors counters to the existing event
+ * @ret: return value of kernel function
+ * @type: type of event MODE_OPEN, MODE_CREATE, etc
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type) {
+ // Get cgroup ID
+ __u32 key = (__u32) ceems_get_current_cgroup_id();
+
+ // If cgroup id is 1, it means it is root cgroup and we are not really interested
+ // in it and so return
+ // Similarly if cgroup id is 0, it means we failed to get cgroup ID
+ if (key == 0 || key == 1)
+ return 0;
+
+ struct vfs_inode_event *event;
+
+ // Fetch event from correct map
+ switch (type) {
+ case MODE_OPEN:
+ event = bpf_map_lookup_elem(&open_accumulator, &key);
+ break;
+ case MODE_CREATE:
+ event = bpf_map_lookup_elem(&create_accumulator, &key);
+ break;
+ case MODE_MKDIR:
+ event = bpf_map_lookup_elem(&create_accumulator, &key);
+ break;
+ case MODE_RMDIR:
+ event = bpf_map_lookup_elem(&unlink_accumulator, &key);
+ break;
+ case MODE_UNLINK:
+ event = bpf_map_lookup_elem(&unlink_accumulator, &key);
+ break;
+ default:
+ return 0;
+ }
+
+ if (!event) {
+ // New event with increment call counter
+ struct vfs_inode_event new_event = { .calls = 1, .errors = 0 };
+
+ // In case of error increment errors else increment bytes
+ if (ret) {
+ new_event.errors = 1;
+ }
+
+ // Update map with new key and event
+ switch (type) {
+ case MODE_OPEN:
+ bpf_map_update_elem(&open_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_CREATE:
+ bpf_map_update_elem(&create_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_MKDIR:
+ bpf_map_update_elem(&create_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_RMDIR:
+ bpf_map_update_elem(&unlink_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_UNLINK:
+ bpf_map_update_elem(&unlink_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+ }
+
+ // Always increment calls
+ __sync_fetch_and_add(&event->calls, 1);
+
+ // In case of error increment errors else increment bytes
+ if (ret) {
+ __sync_fetch_and_add(&event->errors, 1);
+ }
+
+ return 0;
+}
diff --git a/pkg/collector/cgroup.go b/pkg/collector/cgroup.go
new file mode 100644
index 00000000..c709d312
--- /dev/null
+++ b/pkg/collector/cgroup.go
@@ -0,0 +1,152 @@
+package collector
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/containerd/cgroups/v3"
+)
+
+const (
+ // Max cgroup subsystems count that is used from BPF side
+ // to define a max index for the default controllers on tasks.
+ // For further documentation check BPF part.
+ cgroupSubSysCount = 15
+)
+
+// Regular expressions of cgroup paths for different resource managers
+/*
+ For v1 possibilities are /cpuacct/slurm/uid_1000/job_211
+ /memory/slurm/uid_1000/job_211
+
+ For v2 possibilities are /system.slice/slurmstepd.scope/job_211
+ /system.slice/slurmstepd.scope/job_211/step_interactive
+ /system.slice/slurmstepd.scope/job_211/step_extern/user/task_0
+*/
+var (
+ slurmCgroupPathRegex = regexp.MustCompile("^.*/slurm(?:.*?)/job_([0-9]+)(?:.*$)")
+ slurmIgnoreProcsRegex = regexp.MustCompile("slurmstepd:(.*)|sleep ([0-9]+)|/bin/bash (.*)/slurm_script")
+)
+
+// cgroupFS is a struct that contains cgroup related info for a
+// given resource manager.
+type cgroupFS struct {
+ mode cgroups.CGMode // cgroups mode: unified, legacy, hybrid
+ root string // cgroups root
+ mount string // path at which resource manager manages cgroups
+ subsystem string // active subsystem in cgroups v1
+ manager string // cgroup manager
+ idRegex *regexp.Regexp // regular expression to capture cgroup ID
+ pathFilter func(string) bool // function to filter cgroup paths. Function must return true if cgroup path must be ignored
+ procFilter func(string) bool // function to filter processes in cgroup based on cmdline. Function must return true if process must be ignored
+}
+
+// cgroupController is a container for cgroup controllers in v1.
+type cgroupController struct {
+ id uint64 // Hierarchy unique ID
+ idx uint64 // Cgroup SubSys index
+ name string // Controller name
+ active bool // Will be set to true if controller is set and active
+}
+
+// slurmCgroupFS returns cgroupFS struct for SLURM.
+func slurmCgroupFS(cgroupRootPath, subsystem, forceCgroupsVersion string) cgroupFS {
+ var cgroup cgroupFS
+ if cgroups.Mode() == cgroups.Unified {
+ cgroup = cgroupFS{
+ mode: cgroups.Unified,
+ root: cgroupRootPath,
+ mount: filepath.Join(cgroupRootPath, "system.slice/slurmstepd.scope"),
+ }
+ } else {
+ cgroup = cgroupFS{
+ mode: cgroups.Mode(),
+ root: filepath.Join(cgroupRootPath, subsystem),
+ mount: filepath.Join(cgroupRootPath, subsystem, "slurm"),
+ subsystem: subsystem,
+ }
+ }
+
+ // For overriding in tests
+ if forceCgroupsVersion != "" {
+ if forceCgroupsVersion == "v2" {
+ cgroup = cgroupFS{
+ mode: cgroups.Unified,
+ root: cgroupRootPath,
+ mount: filepath.Join(cgroupRootPath, "system.slice/slurmstepd.scope"),
+ }
+ } else if forceCgroupsVersion == "v1" {
+ cgroup = cgroupFS{
+ mode: cgroups.Legacy,
+ root: filepath.Join(cgroupRootPath, subsystem),
+ mount: filepath.Join(cgroupRootPath, subsystem, "slurm"),
+ subsystem: subsystem,
+ }
+ }
+ }
+
+ // Add manager field
+ cgroup.manager = "slurm"
+
+ // Add path regex
+ cgroup.idRegex = slurmCgroupPathRegex
+
+ // Add filter functions
+ cgroup.pathFilter = func(p string) bool {
+ return strings.Contains(p, "/step_")
+ }
+ cgroup.procFilter = func(p string) bool {
+ return slurmIgnoreProcsRegex.MatchString(p)
+ }
+
+ return cgroup
+}
+
+// parseCgroupSubSysIds returns cgroup controllers for cgroups v1.
+func parseCgroupSubSysIds() ([]cgroupController, error) {
+ var cgroupControllers []cgroupController
+
+ // Read /proc/cgroups file
+ file, err := os.Open(procFilePath("cgroups"))
+ if err != nil {
+ return nil, err
+ }
+
+ defer file.Close()
+
+ fscanner := bufio.NewScanner(file)
+
+ var idx uint64 = 0
+
+ fscanner.Scan() // ignore first entry
+
+ for fscanner.Scan() {
+ line := fscanner.Text()
+ fields := strings.Fields(line)
+
+ /* We care only for the controllers that we want */
+ if idx >= cgroupSubSysCount {
+ /* Maybe some cgroups are not upstream? */
+ return cgroupControllers, fmt.Errorf("cgroup default subsystem '%s' is indexed at idx=%d higher than CGROUP_SUBSYS_COUNT=%d",
+ fields[0], idx, cgroupSubSysCount)
+ }
+
+ if id, err := strconv.ParseUint(fields[1], 10, 32); err == nil {
+ cgroupControllers = append(cgroupControllers, cgroupController{
+ id: id,
+ idx: idx,
+ name: fields[0],
+ active: true,
+ })
+ }
+
+ idx++
+ }
+
+ return cgroupControllers, nil
+}
diff --git a/pkg/collector/ebpf.go b/pkg/collector/ebpf.go
new file mode 100644
index 00000000..7062a1d9
--- /dev/null
+++ b/pkg/collector/ebpf.go
@@ -0,0 +1,784 @@
+//go:build !noebpf
+// +build !noebpf
+
+package collector
+
+import (
+ "bytes"
+ "context"
+ "embed"
+ "errors"
+ "fmt"
+ "io/fs"
+ "path/filepath"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/link"
+ "github.com/cilium/ebpf/rlimit"
+ "github.com/containerd/cgroups/v3"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
+ "golang.org/x/sys/unix"
+)
+
+// Embed the entire objs directory.
+//
+//go:embed bpf/objs
+var objsFS embed.FS
+
+const (
+ ebpfCollectorSubsystem = "ebpf"
+)
+
+// CLI options.
+var (
+ collectNetMetrics = CEEMSExporterApp.Flag(
+ "collector.ebpf.network-metrics",
+ "Enables collection of network metrics by epf (default: enabled)",
+ ).Default("true").Bool()
+ collectVFSMetrics = CEEMSExporterApp.Flag(
+ "collector.ebpf.vfs-metrics",
+ "Enables collection of VFS metrics by epf (default: enabled)",
+ ).Default("true").Bool()
+)
+
+// bpfConfig is a container for the config that is passed to bpf progs.
+type bpfConfig struct {
+ CgrpSubsysIdx uint64
+ CgrpFsMagic uint64
+}
+
+// bpfNetEvent is value struct for storing network events in the bpf maps.
+type bpfNetEvent struct {
+ Packets uint64
+ Bytes uint64
+}
+
+// bpfNetEventKey is key struct for storing network events in the bpf maps.
+type bpfNetEventKey struct {
+ Cid uint32
+ Dev [16]uint8
+}
+
+// bpfVfsInodeEvent is value struct for storing VFS inode related
+// events in the bpf maps.
+type bpfVfsInodeEvent struct {
+ Calls uint64
+ Errors uint64
+}
+
+// bpfVfsRwEvent is value struct for storing VFS read/write related
+// events in the bpf maps.
+type bpfVfsRwEvent struct {
+ Bytes uint64
+ Calls uint64
+ Errors uint64
+}
+
+// bpfVfsEventKey is key struct for storing VFS events in the bpf maps.
+type bpfVfsEventKey struct {
+ Cid uint32
+ Mnt [64]uint8
+}
+
+type ebpfCollector struct {
+ logger log.Logger
+ hostname string
+ cgroupFS cgroupFS
+ inodesMap map[uint64]string
+ inodesRevMap map[string]uint64
+ activeCgroups []uint64
+ netColl *ebpf.Collection
+ vfsColl *ebpf.Collection
+ links map[string]link.Link
+ vfsWriteRequests *prometheus.Desc
+ vfsWriteBytes *prometheus.Desc
+ vfsWriteErrors *prometheus.Desc
+ vfsReadRequests *prometheus.Desc
+ vfsReadBytes *prometheus.Desc
+ vfsReadErrors *prometheus.Desc
+ vfsOpenRequests *prometheus.Desc
+ vfsOpenErrors *prometheus.Desc
+ vfsCreateRequests *prometheus.Desc
+ vfsCreateErrors *prometheus.Desc
+ vfsUnlinkRequests *prometheus.Desc
+ vfsUnlinkErrors *prometheus.Desc
+ netIngressPackets *prometheus.Desc
+ netIngressBytes *prometheus.Desc
+ netEgressPackets *prometheus.Desc
+ netEgressBytes *prometheus.Desc
+}
+
+func init() {
+ RegisterCollector(ebpfCollectorSubsystem, defaultDisabled, NewEbpfCollector)
+}
+
+// NewEbpfCollector returns a new instance of ebpf collector.
+func NewEbpfCollector(logger log.Logger) (Collector, error) {
+ var netColl, vfsColl *ebpf.Collection
+
+ var configMap *ebpf.Map
+
+ bpfProgs := make(map[string]*ebpf.Program)
+
+ var err error
+
+ // Atleast one of network or VFS events must be enabled
+ if !*collectNetMetrics && !*collectVFSMetrics {
+ level.Error(logger).Log("msg", "Enable atleast one of --collector.ebpf.network-metrics or --collector.ebpf.vfs-metrics")
+
+ return nil, errors.New("invalid CLI options for ebpf collector")
+ }
+
+ // Get cgroups based on the enabled collector
+ var cgroupFS cgroupFS
+ if *collectorState[slurmCollectorSubsystem] {
+ cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
+ }
+
+ // Remove resource limits for kernels <5.11.
+ if err := rlimit.RemoveMemlock(); err != nil {
+ return nil, fmt.Errorf("error removing memlock: %w", err)
+ }
+
+ // Load network programs
+ if *collectNetMetrics {
+ netColl, err = loadObject("bpf/objs/bpf_network.o")
+ if err != nil {
+ level.Error(logger).Log("msg", "Unable to load network bpf objects", "err", err)
+
+ return nil, err
+ }
+
+ for name, prog := range netColl.Programs {
+ bpfProgs[name] = prog
+ }
+
+ // Set configMap
+ configMap = netColl.Maps["conf_map"]
+ }
+
+ // Load VFS programs
+ if *collectVFSMetrics {
+ objFile, err := bpfVFSObjs()
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to get current kernel version", "err", err)
+
+ return nil, err
+ }
+
+ vfsColl, err = loadObject("bpf/objs/" + objFile)
+ if err != nil {
+ level.Error(logger).Log("msg", "Unable to load VFS bpf objects", "err", err)
+
+ return nil, err
+ }
+
+ for name, prog := range vfsColl.Programs {
+ bpfProgs[name] = prog
+ }
+
+ // Set configMap if not already done
+ if configMap == nil {
+ configMap = vfsColl.Maps["conf_map"]
+ }
+ }
+
+ // Update config map
+ var config bpfConfig
+ if cgroupFS.mode == cgroups.Unified {
+ config = bpfConfig{
+ CgrpSubsysIdx: uint64(0), // Irrelevant for cgroups v2
+ CgrpFsMagic: uint64(unix.CGROUP2_SUPER_MAGIC),
+ }
+ } else {
+ var cgrpSubSysIdx uint64
+
+ // Get all cgroup subsystems
+ cgroupControllers, err := parseCgroupSubSysIds()
+ if err != nil {
+ level.Warn(logger).Log("msg", "Error fetching cgroup controllers", "err", err)
+ }
+
+ for _, cgroupController := range cgroupControllers {
+ if cgroupController.name == strings.TrimSpace(cgroupFS.subsystem) {
+ cgrpSubSysIdx = cgroupController.idx
+ }
+ }
+
+ config = bpfConfig{
+ CgrpSubsysIdx: cgrpSubSysIdx,
+ CgrpFsMagic: uint64(unix.CGROUP_SUPER_MAGIC),
+ }
+ }
+
+ if err := configMap.Update(uint32(0), config, ebpf.UpdateAny); err != nil {
+ return nil, fmt.Errorf("failed to update bpf config: %w", err)
+ }
+
+ // Instantiate ksyms to setup correct kernel names
+ ksyms, err := NewKsyms()
+ if err != nil {
+ return nil, fmt.Errorf("failed to instantiate ksyms: %w", err)
+ }
+
+ // Attach programs by replacing names with the ones from current kernel
+ links := make(map[string]link.Link)
+
+ for name, prog := range bpfProgs {
+ // kprobe/* programs
+ if strings.HasPrefix(name, "kprobe") {
+ if funcName := strings.TrimPrefix(name, "kprobe_"); funcName != "" {
+ kernFuncName, err := ksyms.GetArchSpecificName(funcName)
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to find kernel specific function name", "func", funcName, "err", err)
+
+ continue
+ }
+
+ links[kernFuncName], err = link.Kprobe(kernFuncName, prog, nil)
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to open kprobe", "func", kernFuncName, "err", err)
+ }
+ }
+ }
+
+ // kretprobe/* programs
+ if strings.HasPrefix(name, "kretprobe") {
+ if funcName := strings.TrimPrefix(name, "kretprobe_"); funcName != "" {
+ kernFuncName, err := ksyms.GetArchSpecificName(funcName)
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to find kernel specific function name", "func", funcName, "err", err)
+
+ continue
+ }
+
+ links[kernFuncName], err = link.Kretprobe(kernFuncName, prog, nil)
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to open kretprobe", "func", kernFuncName, "err", err)
+ }
+ }
+ }
+
+ // fentry/* programs
+ if strings.HasPrefix(name, "fentry") {
+ kernFuncName := strings.TrimPrefix(name, "fentry_")
+ links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
+ Program: prog,
+ AttachType: ebpf.AttachTraceFEntry,
+ })
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to open fentry", "func", kernFuncName, "err", err)
+ }
+ }
+
+ // fexit/* programs
+ if strings.HasPrefix(name, "fexit") {
+ kernFuncName := strings.TrimPrefix(name, "fexit_")
+ links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
+ Program: prog,
+ AttachType: ebpf.AttachTraceFExit,
+ })
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to open fexit", "func", kernFuncName, "err", err)
+ }
+ }
+ }
+
+ return &ebpfCollector{
+ logger: logger,
+ hostname: hostname,
+ cgroupFS: cgroupFS,
+ inodesMap: make(map[uint64]string),
+ inodesRevMap: make(map[string]uint64),
+ netColl: netColl,
+ vfsColl: vfsColl,
+ links: links,
+ vfsWriteBytes: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "write_bytes_total"),
+ "Total number of bytes written from a cgroup in bytes",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsWriteRequests: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "write_requests_total"),
+ "Total number of write requests from a cgroup",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsWriteErrors: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "write_errors_total"),
+ "Total number of write errors from a cgroup",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsReadBytes: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "read_bytes_total"),
+ "Total number of bytes read from a cgroup in bytes",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsReadRequests: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "read_requests_total"),
+ "Total number of read requests from a cgroup",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsReadErrors: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "read_errors_total"),
+ "Total number of read errors from a cgroup",
+ []string{"manager", "hostname", "uuid", "mountpoint"},
+ nil,
+ ),
+ vfsOpenRequests: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "open_requests_total"),
+ "Total number of open requests from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ vfsOpenErrors: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "open_errors_total"),
+ "Total number of open errors from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ vfsCreateRequests: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "create_requests_total"),
+ "Total number of create requests from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ vfsCreateErrors: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "open_create_total"),
+ "Total number of create errors from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ vfsUnlinkRequests: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "unlink_requests_total"),
+ "Total number of unlink requests from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ vfsUnlinkErrors: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "unlink_errors_total"),
+ "Total number of unlink errors from a cgroup",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ netIngressPackets: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "ingress_packets_total"),
+ "Total number of ingress packets from a cgroup",
+ []string{"manager", "hostname", "uuid", "dev"},
+ nil,
+ ),
+ netIngressBytes: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "ingress_bytes_total"),
+ "Total number of ingress bytes from a cgroup",
+ []string{"manager", "hostname", "uuid", "dev"},
+ nil,
+ ),
+ netEgressPackets: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "egress_packets_total"),
+ "Total number of egress packets from a cgroup",
+ []string{"manager", "hostname", "uuid", "dev"},
+ nil,
+ ),
+ netEgressBytes: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "egress_bytes_total"),
+ "Total number of egress bytes from a cgroup",
+ []string{"manager", "hostname", "uuid", "dev"},
+ nil,
+ ),
+ }, nil
+}
+
+// Update implements Collector and update job metrics.
+func (c *ebpfCollector) Update(ch chan<- prometheus.Metric) error {
+ // Fetch all active cgroups
+ if err := c.getActiveCgroups(); err != nil {
+ return err
+ }
+
+ // Start wait group
+ wg := sync.WaitGroup{}
+ wg.Add(7)
+
+ // Update different metrics in go routines
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateVFSWrite(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update VFS write stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateVFSRead(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update VFS read stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateVFSOpen(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update VFS open stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateVFSCreate(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update VFS create stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateVFSUnlink(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update VFS unlink stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateNetEgress(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update network egress stats", "err", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateNetIngress(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update network ingress stats", "err", err)
+ }
+ }()
+
+ // Wait for all go routines
+ wg.Wait()
+
+ return nil
+}
+
+// Stop releases system resources used by the collector.
+func (c *ebpfCollector) Stop(_ context.Context) error {
+ // Close all probes
+ for name, link := range c.links {
+ if err := link.Close(); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to close link", "func", name, "err", err)
+ }
+ }
+
+ // Close network collection
+ if c.netColl != nil {
+ c.netColl.Close()
+ }
+
+ // Close VFS collection
+ if c.vfsColl != nil {
+ c.vfsColl.Close()
+ }
+
+ return nil
+}
+
+// updateVFSWrite updates VFS write metrics.
+func (c *ebpfCollector) updateVFSWrite(ch chan<- prometheus.Metric) error {
+ var key bpfVfsEventKey
+
+ var value bpfVfsRwEvent
+
+ if c.vfsColl != nil {
+ if m, ok := c.vfsColl.Maps["write_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ mount := unix.ByteSliceToString(key.Mnt[:])
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateVFSRead updates VFS read metrics.
+func (c *ebpfCollector) updateVFSRead(ch chan<- prometheus.Metric) error {
+ var key bpfVfsEventKey
+
+ var value bpfVfsRwEvent
+
+ if c.vfsColl != nil {
+ if m, ok := c.vfsColl.Maps["read_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ mount := unix.ByteSliceToString(key.Mnt[:])
+ ch <- prometheus.MustNewConstMetric(c.vfsReadRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateVFSOpen updates VFS open stats.
+func (c *ebpfCollector) updateVFSOpen(ch chan<- prometheus.Metric) error {
+ var key uint32
+
+ var value bpfVfsInodeEvent
+
+ if c.vfsColl != nil {
+ if m, ok := c.vfsColl.Maps["open_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateVFSCreate updates VFS create stats.
+func (c *ebpfCollector) updateVFSCreate(ch chan<- prometheus.Metric) error {
+ var key uint32
+
+ var value bpfVfsInodeEvent
+
+ if c.vfsColl != nil {
+ if m, ok := c.vfsColl.Maps["create_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateVFSUnlink updates VFS unlink stats.
+func (c *ebpfCollector) updateVFSUnlink(ch chan<- prometheus.Metric) error {
+ var key uint32
+
+ var value bpfVfsInodeEvent
+
+ if c.vfsColl != nil {
+ if m, ok := c.vfsColl.Maps["unlink_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateNetIngress updates network ingress stats.
+func (c *ebpfCollector) updateNetIngress(ch chan<- prometheus.Metric) error {
+ var key bpfNetEventKey
+
+ var value bpfNetEvent
+
+ if c.netColl != nil {
+ if m, ok := c.netColl.Maps["ingress_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ device := unix.ByteSliceToString(key.Dev[:])
+ ch <- prometheus.MustNewConstMetric(c.netIngressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
+ ch <- prometheus.MustNewConstMetric(c.netIngressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// updateNetEgress updates network egress stats.
+func (c *ebpfCollector) updateNetEgress(ch chan<- prometheus.Metric) error {
+ var key bpfNetEventKey
+
+ var value bpfNetEvent
+
+ if c.netColl != nil {
+ if m, ok := c.netColl.Maps["egress_accumulator"]; ok {
+ defer m.Close()
+
+ for m.Iterate().Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ device := unix.ByteSliceToString(key.Dev[:])
+ ch <- prometheus.MustNewConstMetric(c.netEgressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
+ ch <- prometheus.MustNewConstMetric(c.netEgressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *ebpfCollector) getActiveCgroups() error {
+ // Get currently active jobs and set them in activeJobs state variable
+ var activeUUIDs []string
+
+ // Reset activeCgroups from last scrape
+ c.activeCgroups = make([]uint64, 0)
+
+ // Walk through all cgroups and get cgroup paths
+ if err := filepath.WalkDir(c.cgroupFS.mount, func(p string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Ignore irrelevant cgroup paths
+ if !info.IsDir() || c.cgroupFS.pathFilter(p) {
+ return nil
+ }
+
+ // Get cgroup ID
+ cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(p)
+ if len(cgroupIDMatches) <= 1 {
+ return nil
+ }
+
+ uuid := strings.TrimSpace(cgroupIDMatches[1])
+ if uuid == "" {
+ level.Error(c.logger).Log("msg", "Empty UUID", "path", p)
+
+ return nil
+ }
+
+ // Check if we already passed through this cgroup
+ if slices.Contains(activeUUIDs, uuid) {
+ return nil
+ }
+
+ // Get inode of the cgroup
+ if _, ok := c.inodesRevMap[uuid]; !ok {
+ if inode, err := inode(p); err == nil {
+ c.inodesRevMap[uuid] = inode
+ c.inodesMap[inode] = p
+ }
+ }
+
+ activeUUIDs = append(activeUUIDs, uuid)
+ c.activeCgroups = append(c.activeCgroups, c.inodesRevMap[uuid])
+
+ level.Debug(c.logger).Log("msg", "cgroup path", "path", p)
+
+ return nil
+ }); err != nil {
+ level.Error(c.logger).
+ Log("msg", "Error walking cgroup subsystem", "path", c.cgroupFS.mount, "err", err)
+
+ return err
+ }
+
+ // Remove expired uuids from inodeMap and inodeRevMap
+ for uuid, inode := range c.inodesRevMap {
+ if !slices.Contains(activeUUIDs, uuid) {
+ delete(c.inodesRevMap, uuid)
+ delete(c.inodesMap, inode)
+ }
+ }
+
+ return nil
+}
+
+// bpfVFSObjs returns the VFS bpf objects based on current kernel version.
+func bpfVFSObjs() (string, error) {
+ // Get current kernel version
+ currentKernelVer, err := KernelVersion()
+ if err != nil {
+ return "", err
+ }
+
+ // Return appropriate bpf object file based on kernel version
+ if currentKernelVer > KernelStringToNumeric("6.2") {
+ return "bpf_vfs.o", nil
+ } else if currentKernelVer > KernelStringToNumeric("5.11") && currentKernelVer <= KernelStringToNumeric("6.2") {
+ return "bpf_vfs_v62.o", nil
+ } else {
+ return "bpf_vfs_v511.o", nil
+ }
+}
+
+// loadObject loads a BPF ELF file and returns a Collection.
+func loadObject(path string) (*ebpf.Collection, error) {
+ // Read ELF file
+ file, err := objsFS.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read object file: %w", err)
+ }
+
+ // Make a reader and get CollectionSpec
+ reader := bytes.NewReader(file)
+
+ spec, err := ebpf.LoadCollectionSpecFromReader(reader)
+ if err != nil {
+ var ve *ebpf.VerifierError
+ if errors.As(err, &ve) {
+ err = fmt.Errorf("%+v", ve) //nolint:errorlint
+ }
+
+ return nil, fmt.Errorf("failed to load object: %w", err)
+ }
+
+ // Instantiate a Collection from a CollectionSpec.
+ coll, err := ebpf.NewCollection(spec)
+ if err != nil {
+ return nil, fmt.Errorf("failed to instantiate collection: %w", err)
+ }
+
+ return coll, nil
+}
diff --git a/pkg/collector/ebpf_test.go b/pkg/collector/ebpf_test.go
new file mode 100644
index 00000000..f00ee032
--- /dev/null
+++ b/pkg/collector/ebpf_test.go
@@ -0,0 +1,249 @@
+package collector
+
+import (
+ "context"
+ "os/user"
+ "testing"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// func mockVFSSpec() *ebpf.CollectionSpec {
+// var mnt [64]uint8
+// // mock mount
+// copy(mnt[:], "/home/test")
+
+// return &ebpf.CollectionSpec{
+// Maps: map[string]*ebpf.MapSpec{
+// "write_accumulator": {
+// Type: ebpf.Hash,
+// KeySize: 68,
+// ValueSize: 24,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: bpfVfsEventKey{
+// Cid: uint32(1234),
+// Mnt: mnt,
+// },
+// Value: bpfVfsRwEvent{
+// Calls: uint64(10),
+// Bytes: uint64(10000),
+// Errors: uint64(1),
+// },
+// },
+// },
+// },
+// "read_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: bpfVfsEventKey{
+// Cid: uint32(1234),
+// Mnt: mnt,
+// },
+// Value: bpfVfsRwEvent{
+// Calls: uint64(20),
+// Bytes: uint64(20000),
+// Errors: uint64(2),
+// },
+// },
+// },
+// },
+// "open_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: uint32(1234),
+// Value: bpfVfsInodeEvent{
+// Calls: uint64(30),
+// Errors: uint64(3),
+// },
+// },
+// },
+// },
+// "create_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: uint32(1234),
+// Value: bpfVfsInodeEvent{
+// Calls: uint64(40),
+// Errors: uint64(4),
+// },
+// },
+// },
+// },
+// "unlink_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: uint32(1234),
+// Value: bpfVfsInodeEvent{
+// Calls: uint64(50),
+// Errors: uint64(5),
+// },
+// },
+// },
+// },
+// },
+// }
+// }
+
+// func mockNetSpec() *ebpf.CollectionSpec {
+// var dev [16]uint8
+// // mock mount
+// copy(dev[:], "eno1")
+
+// return &ebpf.CollectionSpec{
+// Maps: map[string]*ebpf.MapSpec{
+// "ingress_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: bpfNetEventKey{
+// Cid: uint32(1234),
+// Dev: dev,
+// },
+// Value: bpfNetEvent{
+// Packets: uint64(10),
+// Bytes: uint64(10000),
+// },
+// },
+// },
+// },
+// "egress_accumulator": {
+// Type: ebpf.Hash,
+// MaxEntries: 1,
+// Contents: []ebpf.MapKV{
+// {
+// Key: bpfNetEventKey{
+// Cid: uint32(1234),
+// Dev: dev,
+// },
+// Value: bpfNetEvent{
+// Packets: uint64(20),
+// Bytes: uint64(20000),
+// },
+// },
+// },
+// },
+// },
+// }
+// }
+
+func skipUnprivileged(t *testing.T) {
+ t.Helper()
+
+ // Get current user
+ currentUser, err := user.Current()
+ require.NoError(t, err)
+
+ if currentUser.Uid != "0" {
+ t.Skip("Skipping testing due to lack of privileges")
+ }
+}
+
+func TestNewEbpfCollector(t *testing.T) {
+ skipUnprivileged(t)
+
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ "--collector.slurm.force-cgroups-version", "v2",
+ },
+ )
+ require.NoError(t, err)
+
+ collector, err := NewEbpfCollector(log.NewNopLogger())
+ require.NoError(t, err)
+
+ // Setup background goroutine to capture metrics.
+ metrics := make(chan prometheus.Metric)
+ defer close(metrics)
+
+ go func() {
+ i := 0
+ for range metrics {
+ i++
+ }
+ }()
+
+ err = collector.Update(metrics)
+ require.NoError(t, err)
+
+ err = collector.Stop(context.Background())
+ require.NoError(t, err)
+}
+
+func TestActiveCgroupsV2(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ },
+ )
+ require.NoError(t, err)
+
+ c := ebpfCollector{
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
+ logger: log.NewNopLogger(),
+ inodesMap: make(map[uint64]string),
+ inodesRevMap: make(map[string]uint64),
+ }
+
+ // Get active cgroups
+ err = c.getActiveCgroups()
+ require.NoError(t, err)
+
+ assert.Len(t, c.activeCgroups, 3)
+ assert.Len(t, c.inodesMap, 3)
+ assert.Len(t, c.inodesRevMap, 3)
+
+ // Get cgroup IDs
+ var uuids []string
+ for uuid := range c.inodesRevMap {
+ uuids = append(uuids, uuid)
+ }
+
+ assert.ElementsMatch(t, []string{"1009248", "1009249", "1009250"}, uuids)
+}
+
+func TestActiveCgroupsV1(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ },
+ )
+ require.NoError(t, err)
+
+ c := ebpfCollector{
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
+ logger: log.NewNopLogger(),
+ inodesMap: make(map[uint64]string),
+ inodesRevMap: make(map[string]uint64),
+ }
+
+ // Get active cgroups
+ err = c.getActiveCgroups()
+ require.NoError(t, err)
+
+ assert.Len(t, c.activeCgroups, 3)
+ assert.Len(t, c.inodesMap, 3)
+ assert.Len(t, c.inodesRevMap, 3)
+
+ // Get cgroup IDs
+ var uuids []string
+ for uuid := range c.inodesRevMap {
+ uuids = append(uuids, uuid)
+ }
+
+ assert.ElementsMatch(t, []string{"1009248", "1009249", "1009250"}, uuids)
+}
diff --git a/pkg/collector/helper.go b/pkg/collector/helper.go
index fc1d6929..b6bb4824 100644
--- a/pkg/collector/helper.go
+++ b/pkg/collector/helper.go
@@ -8,6 +8,7 @@ import (
"regexp"
"strconv"
"strings"
+ "syscall"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -26,28 +27,6 @@ var (
reParens = regexp.MustCompile(`\((.*)\)`)
)
-// fileExists checks if given file exists or not.
-func fileExists(filename string) bool {
- info, err := os.Stat(filename)
- if os.IsNotExist(err) {
- return false
- }
-
- return !info.IsDir()
-}
-
-// // Find named matches in regex groups and return a map.
-// func findNamedMatches(regex *regexp.Regexp, str string) map[string]string {
-// match := regex.FindStringSubmatch(str)
-
-// results := map[string]string{}
-// for i, name := range match {
-// results[regex.SubexpNames()[i]] = name
-// }
-
-// return results
-// }
-
// SanitizeMetricName sanitize the given metric name by replacing invalid characters by underscores.
//
// OpenMetrics and the Prometheus exposition format require the metric name
@@ -270,3 +249,40 @@ func GetAMDGPUDevices(rocmSmiPath string, logger log.Logger) (map[int]Device, er
// Get all devices
return parseAmdSmioutput(string(rocmSmiOutput), logger), nil
}
+
+// fileExists checks if given file exists or not.
+func fileExists(filename string) bool {
+ info, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ return false
+ }
+
+ return !info.IsDir()
+}
+
+// // Find named matches in regex groups and return a map.
+// func findNamedMatches(regex *regexp.Regexp, str string) map[string]string {
+// match := regex.FindStringSubmatch(str)
+
+// results := map[string]string{}
+// for i, name := range match {
+// results[regex.SubexpNames()[i]] = name
+// }
+
+// return results
+// }
+
+// inode returns the inode of a given path.
+func inode(path string) (uint64, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return 0, fmt.Errorf("error running stat(%s): %w", path, err)
+ }
+
+ stat, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("missing syscall.Stat_t in FileInfo for %s", path)
+ }
+
+ return stat.Ino, nil
+}
diff --git a/pkg/collector/ipmi.go b/pkg/collector/ipmi.go
index 2ce4acc3..67eb2ca8 100644
--- a/pkg/collector/ipmi.go
+++ b/pkg/collector/ipmi.go
@@ -1,11 +1,11 @@
-// Taken from prometheus-community/ipmi_exporter/blob/master/collector_ipmi.go
-// DCMI spec (old) https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/dcmi-v1-5-rev-spec.pdf
-
//go:build !noimpi
// +build !noimpi
package collector
+// Taken from prometheus-community/ipmi_exporter/blob/master/collector_ipmi.go
+// DCMI spec (old) https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/dcmi-v1-5-rev-spec.pdf
+
import (
"context"
"encoding/json"
diff --git a/pkg/collector/kernel.go b/pkg/collector/kernel.go
new file mode 100644
index 00000000..708dfd77
--- /dev/null
+++ b/pkg/collector/kernel.go
@@ -0,0 +1,219 @@
+//go:build !noebpf
+// +build !noebpf
+
+package collector
+
+// Many of these utility functions have been nicked from https://github.com/cilium/tetragon
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// ksym is a structure for a kernel symbol.
+type ksym struct {
+ // addr uint64
+ name string
+ ty string
+ kmod string
+}
+
+// isFunction returns true if the given kernel symbol is a function.
+func (ksym *ksym) isFunction() bool {
+ tyLow := strings.ToLower(ksym.ty)
+
+ return tyLow == "w" || tyLow == "t"
+}
+
+// Ksyms is a structure for kernel symbols.
+type Ksyms struct {
+ table []ksym
+}
+
+// NewKsyms creates a new Ksyms structure (by reading procfs/kallsyms).
+func NewKsyms() (*Ksyms, error) {
+ file, err := os.Open(procFilePath("kallsyms"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // err = nil
+ var ksyms Ksyms
+
+ s := bufio.NewScanner(file)
+ // needsSort := false
+
+ for s.Scan() {
+ txt := s.Text()
+ fields := strings.Fields(txt)
+
+ var sym ksym
+
+ if len(fields) < 3 {
+ // fmt.Fprintf(os.Stderr, "Failed to parse: '%s'\n", txt)
+ continue
+ }
+
+ // Reading symbol addresses need privileges and we are currently not
+ // using addresses. So, ignore reading addresses and populate table
+ // only with names
+ // if sym.addr, err = strconv.ParseUint(fields[0], 16, 64); err != nil {
+ // err = fmt.Errorf("failed to parse address: %v", err)
+ // break
+ // }
+ sym.ty = fields[1]
+ sym.name = fields[2]
+
+ // fmt.Printf("%s => %d %s\n", txt, sym.addr, sym.name)
+ // if sym.isFunction() && sym.addr == 0 {
+ // err = fmt.Errorf("function %s reported at address 0. Insuffcient permissions?", sym.name)
+ // break
+ // }
+
+ // check if this symbol is part of a kmod
+ if sym.isFunction() && len(fields) >= 4 {
+ sym.kmod = strings.Trim(fields[3], "[]")
+ }
+
+ // if !needsSort && len(ksyms.table) > 0 {
+ // lastSym := ksyms.table[len(ksyms.table)-1]
+ // if lastSym.addr > sym.addr {
+ // needsSort = true
+ // }
+ // }
+
+ ksyms.table = append(ksyms.table, sym)
+ }
+
+ // if err == nil {
+ // err = s.Err()
+ // }
+
+ // if err != nil && len(ksyms.table) == 0 {
+ // err = errors.New("no symbols found")
+ // }
+
+ // if err != nil {
+ // return nil, err
+ // }
+
+ // if needsSort {
+ // sort.Slice(ksyms.table[:], func(i1, i2 int) bool { return ksyms.table[i1].addr < ksyms.table[i2].addr })
+ // }
+
+ return &ksyms, nil
+}
+
+// IsAvailable returns true if the given name is available on current kernel.
+func (k *Ksyms) IsAvailable(name string) bool {
+ for _, sym := range k.table {
+ if sym.name == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// GetArchSpecificName returns architecture specific symbol (if exists) of a given
+// kernel symbol.
+func (k *Ksyms) GetArchSpecificName(name string) (string, error) {
+ // This linear search is slow. But this only happens during the validation
+ // of kprobe-based tracing polies. TODO: optimise if needed
+ reg := regexp.MustCompile(fmt.Sprintf("(.*)%s$", name))
+ for _, s := range k.table {
+ // Compiler optimizations will add suffixes like .constprops, .isra
+ // Split them first and then check for prefixes
+ // https://people.redhat.com/~jolawren/klp-compiler-notes/livepatch/compiler-considerations.html
+ // https://lore.kernel.org/lkml/20170104172509.27350-13-acme@kernel.org/
+ if reg.MatchString(strings.Split(s.name, ".")[0]) {
+ // We should not return symbols with __pfx_ and __cfi_ prefixes
+ // https://lore.kernel.org/lkml/20230207135402.38f73bb6@gandalf.local.home/t/
+ // https://www.spinics.net/lists/kernel/msg4573413.html
+ if !strings.HasPrefix(s.name, "__pfx_") && !strings.HasPrefix(s.name, "__cfi_") {
+ return s.name, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("symbol %s not found in kallsyms or is not part of a module", name)
+}
+
+// KernelStringToNumeric converts the kernel version string into a numerical value
+// that can be used to make comparison.
+func KernelStringToNumeric(ver string) int64 {
+ // vendors like to define kernel 4.14.128-foo but
+ // everything after '-' is meaningless from BPF
+ // side so toss it out.
+ release := strings.Split(ver, "-")
+ verStr := release[0]
+ numeric := strings.TrimRight(verStr, "+")
+ vers := strings.Split(numeric, ".")
+
+ // Split out major, minor, and patch versions
+ majorS := vers[0]
+
+ minorS := ""
+ if len(vers) >= 2 {
+ minorS = vers[1]
+ }
+
+ patchS := ""
+ if len(vers) >= 3 {
+ patchS = vers[2]
+ }
+
+ // If we have no major version number, all is lost
+ major, err := strconv.ParseInt(majorS, 10, 32)
+ if err != nil {
+ return 0
+ }
+ // Fall back to minor = 0 if we can't parse the minor version
+ minor, err := strconv.ParseInt(minorS, 10, 32)
+ if err != nil {
+ minor = 0
+ }
+ // Fall back to patch = 0 if we can't parse the patch version
+ patch, err := strconv.ParseInt(patchS, 10, 32)
+ if err != nil {
+ patch = 0
+ }
+ // Similar to https://elixir.bootlin.com/linux/v6.2.16/source/tools/lib/bpf/bpf_helpers.h#L74
+ // we have to check that patch is <= 255. Otherwise make that 255.
+ if patch > 255 {
+ patch = 255
+ }
+
+ return ((major << 16) + (minor << 8) + patch)
+}
+
+// KernelVersion returns kernel version of current host.
+func KernelVersion() (int64, error) {
+ var versionStrings []string
+
+ if versionSig, err := os.ReadFile(procFilePath("version_signature")); err == nil {
+ versionStrings = strings.Fields(string(versionSig))
+ }
+
+ if len(versionStrings) > 0 {
+ return KernelStringToNumeric(versionStrings[len(versionStrings)-1]), nil
+ }
+
+ var uname unix.Utsname
+
+ err := unix.Uname(&uname)
+ if err != nil {
+ return 0, err
+ }
+
+ release := unix.ByteSliceToString(uname.Release[:])
+
+ return KernelStringToNumeric(release), nil
+}
diff --git a/pkg/collector/kernel_test.go b/pkg/collector/kernel_test.go
new file mode 100644
index 00000000..17271203
--- /dev/null
+++ b/pkg/collector/kernel_test.go
@@ -0,0 +1,108 @@
+//go:build !noebpf
+// +build !noebpf
+
+package collector
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestKsyms(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse([]string{
+ "--path.procfs", "testdata/proc",
+ })
+ require.NoError(t, err)
+
+ ksyms, err := NewKsyms()
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ sym string
+ avail bool
+ ksym string
+ }{
+ {
+ name: "stable symbol",
+ sym: "vfs_read",
+ avail: true,
+ ksym: "vfs_read",
+ },
+ {
+ name: "unknown symbol",
+ sym: "asdfg",
+ avail: false,
+ ksym: "",
+ },
+ {
+ name: "arch specific symbol",
+ sym: "__netif_receive_skb_core",
+ avail: false,
+ ksym: "__netif_receive_skb_core.constprop.0",
+ },
+ }
+
+ for _, test := range tests {
+ avail := ksyms.IsAvailable(test.sym)
+ if test.avail {
+ assert.True(t, avail, test.name)
+ } else {
+ assert.False(t, avail, test.name)
+ }
+
+ ksym, err := ksyms.GetArchSpecificName(test.sym)
+ if test.ksym != "" {
+ assert.Equal(t, test.ksym, ksym, test.name)
+ } else {
+ assert.Error(t, err, test.name)
+ }
+ }
+}
+
+func TestKernelStringToNumeric(t *testing.T) {
+ v1 := KernelStringToNumeric("5.17.0")
+ v2 := KernelStringToNumeric("5.17.0+")
+ v3 := KernelStringToNumeric("5.17.0-foobar")
+
+ assert.Equal(t, v1, v2)
+ assert.Equal(t, v2, v3)
+
+ v1 = KernelStringToNumeric("5.4.144+")
+ v2 = KernelStringToNumeric("5.10.0")
+ assert.Less(t, v1, v2)
+
+ v1 = KernelStringToNumeric("5")
+ v2 = KernelStringToNumeric("5.4")
+ v3 = KernelStringToNumeric("5.4.0")
+ v4 := KernelStringToNumeric("5.4.1")
+
+ assert.Less(t, v1, v2)
+ assert.Equal(t, v2, v3)
+ assert.Less(t, v2, v4)
+
+ v1 = KernelStringToNumeric("4")
+ v2 = KernelStringToNumeric("4.19")
+ v3 = KernelStringToNumeric("5.19")
+
+ assert.Less(t, v1, v2)
+ assert.Less(t, v2, v3)
+ assert.Less(t, v1, v3)
+
+ v1 = KernelStringToNumeric("5.4.263")
+ v2 = KernelStringToNumeric("5.5.0")
+ assert.Less(t, v1, v2)
+}
+
+func TestGetKernelVersion(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse([]string{
+ "--path.procfs", "testdata/proc",
+ })
+ require.NoError(t, err)
+
+ ver, err := KernelVersion()
+ require.NoError(t, err)
+ assert.EqualValues(t, int64(394509), ver)
+}
diff --git a/pkg/collector/perf.go b/pkg/collector/perf.go
index 644ff726..bcc5fc81 100644
--- a/pkg/collector/perf.go
+++ b/pkg/collector/perf.go
@@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
- "regexp"
"slices"
"strings"
"sync"
@@ -78,8 +77,7 @@ type perfCollector struct {
perfSwProfilerTypes perf.SoftwareProfilerType
perfCacheProfilerTypes perf.CacheProfilerType
- cgroupIDRegex *regexp.Regexp // Regex to extract cgroup ID from process
- filterProcCmdRegex *regexp.Regexp // Processes with command line matching this regex will be ignored
+ cgroupFS cgroupFS
desc map[string]*prometheus.Desc
@@ -166,9 +164,7 @@ func NewPerfCollector(logger log.Logger) (Collector, error) {
}
if *collectorState[slurmCollectorSubsystem] {
- collector.manager = "slurm"
- collector.cgroupIDRegex = slurmCgroupPathRegex
- collector.filterProcCmdRegex = slurmIgnoreProcsRegex
+ collector.cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
}
var err error
@@ -441,20 +437,32 @@ func (c *perfCollector) Update(ch chan<- prometheus.Metric) error {
// Remove all profilers that have already finished
c.closeProfilers(activePIDs)
+ // Start a wait group
+ wg := sync.WaitGroup{}
+ wg.Add(len(cgroupIDProcMap))
+
+ // Update metrics in go routines for each cgroup
for cgroupID, procs := range cgroupIDProcMap {
- if err := c.updateHardwareCounters(cgroupID, procs, ch); err != nil {
- level.Error(c.logger).Log("msg", "failed to update hardware counters", "cgroup", cgroupID, "err", err)
- }
+ go func(cid string, ps []procfs.Proc) {
+ defer wg.Done()
- if err := c.updateSoftwareCounters(cgroupID, procs, ch); err != nil {
- level.Error(c.logger).Log("msg", "failed to update software counters", "cgroup", cgroupID, "err", err)
- }
+ if err := c.updateHardwareCounters(cid, ps, ch); err != nil {
+ level.Error(c.logger).Log("msg", "failed to update hardware counters", "cgroup", cgroupID, "err", err)
+ }
- if err := c.updateCacheCounters(cgroupID, procs, ch); err != nil {
- level.Error(c.logger).Log("msg", "failed to update cache counters", "cgroup", cgroupID, "err", err)
- }
+ if err := c.updateSoftwareCounters(cid, ps, ch); err != nil {
+ level.Error(c.logger).Log("msg", "failed to update software counters", "cgroup", cgroupID, "err", err)
+ }
+
+ if err := c.updateCacheCounters(cid, ps, ch); err != nil {
+ level.Error(c.logger).Log("msg", "failed to update cache counters", "cgroup", cgroupID, "err", err)
+ }
+ }(cgroupID, procs)
}
+ // Wait all go routines
+ wg.Wait()
+
return nil
}
@@ -721,28 +729,27 @@ func (c *perfCollector) discoverProcess() (map[string][]procfs.Proc, error) {
check_process:
// Ignore processes where command line matches the regex
- if c.filterProcCmdRegex != nil {
+ if c.cgroupFS.procFilter != nil {
procCmdLine, err := proc.CmdLine()
if err != nil || len(procCmdLine) == 0 {
continue
}
// Ignore process if matches found
- procCmdLineMatches := c.filterProcCmdRegex.FindStringSubmatch(strings.Join(procCmdLine, " "))
- if len(procCmdLineMatches) > 1 {
+ if c.cgroupFS.procFilter(strings.Join(procCmdLine, " ")) {
continue
}
}
// Get cgroup ID from regex
- if c.cgroupIDRegex != nil {
+ if c.cgroupFS.idRegex != nil {
cgroups, err := proc.Cgroups()
if err != nil || len(cgroups) == 0 {
continue
}
for _, cgroup := range cgroups {
- cgroupIDMatches := c.cgroupIDRegex.FindStringSubmatch(cgroup.Path)
+ cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(cgroup.Path)
if len(cgroupIDMatches) <= 1 {
continue
}
diff --git a/pkg/collector/perf_test.go b/pkg/collector/perf_test.go
index ab74125a..168e06b3 100644
--- a/pkg/collector/perf_test.go
+++ b/pkg/collector/perf_test.go
@@ -70,8 +70,7 @@ func TestDiscoverProcess(t *testing.T) {
collector := perfCollector{
logger: log.NewNopLogger(),
envVar: "ENABLE_PROFILING",
- cgroupIDRegex: slurmCgroupPathRegex,
- filterProcCmdRegex: slurmIgnoreProcsRegex,
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, ""),
perfHwProfilersEnabled: true,
perfSwProfilersEnabled: true,
perfCacheProfilersEnabled: true,
@@ -121,8 +120,7 @@ func TestNewProfilers(t *testing.T) {
collector := perfCollector{
logger: log.NewNopLogger(),
- cgroupIDRegex: slurmCgroupPathRegex,
- filterProcCmdRegex: slurmIgnoreProcsRegex,
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, ""),
perfHwProfilersEnabled: true,
perfSwProfilersEnabled: true,
perfCacheProfilersEnabled: true,
diff --git a/pkg/collector/regexp.go b/pkg/collector/regexp.go
deleted file mode 100644
index 327fa2b6..00000000
--- a/pkg/collector/regexp.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package collector
-
-import "regexp"
-
-// Regular expressions of cgroup paths for different resource managers
-/*
- For v1 possibilities are /cpuacct/slurm/uid_1000/job_211
- /memory/slurm/uid_1000/job_211
-
- For v2 possibilities are /system.slice/slurmstepd.scope/job_211
- /system.slice/slurmstepd.scope/job_211/step_interactive
- /system.slice/slurmstepd.scope/job_211/step_extern/user/task_0
-*/
-var (
- slurmCgroupPathRegex = regexp.MustCompile("^.*/slurm(?:.*?)/job_([0-9]+)(?:.*$)")
- slurmIgnoreProcsRegex = regexp.MustCompile("slurmstepd:(.*)|sleep ([0-9]+)|/bin/bash (.*)/slurm_script")
-)
diff --git a/pkg/collector/slurm.go b/pkg/collector/slurm.go
index 076073b2..d92a0665 100644
--- a/pkg/collector/slurm.go
+++ b/pkg/collector/slurm.go
@@ -104,10 +104,7 @@ type CgroupMetric struct {
}
type slurmCollector struct {
- cgroups string // v1 or v2
- cgroupsRootPath string
- slurmCgroupsPath string
- manager string
+ cgroupFS cgroupFS
hostname string
gpuDevs map[int]Device
hostMemTotal float64
@@ -151,36 +148,9 @@ func NewSlurmCollector(logger log.Logger) (Collector, error) {
Log("msg", "flag --collector.slurm.swap.memory.metrics has been deprecated. Use --collector.slurm.swap-memory-metrics instead")
}
- var cgroupsVersion string
-
- var cgroupsRootPath string
-
- var slurmCgroupsPath string
-
- // Set cgroups root path based on cgroups version
- if cgroups.Mode() == cgroups.Unified {
- cgroupsVersion = "v2"
- cgroupsRootPath = *cgroupfsPath
- slurmCgroupsPath = filepath.Join(*cgroupfsPath, "system.slice/slurmstepd.scope")
- } else {
- cgroupsVersion = "v1"
- cgroupsRootPath = filepath.Join(*cgroupfsPath, *cgroupsV1Subsystem)
- slurmCgroupsPath = filepath.Join(cgroupsRootPath, "slurm")
- }
-
- level.Info(logger).Log("cgroup", cgroupsVersion, "mount", slurmCgroupsPath)
-
- // If cgroup version is set via CLI flag for testing override the one we got earlier
- if *forceCgroupsVersion != "" {
- cgroupsVersion = *forceCgroupsVersion
- if cgroupsVersion == "v2" {
- cgroupsRootPath = *cgroupfsPath
- slurmCgroupsPath = filepath.Join(*cgroupfsPath, "system.slice/slurmstepd.scope")
- } else if cgroupsVersion == "v1" {
- cgroupsRootPath = filepath.Join(*cgroupfsPath, "cpuacct")
- slurmCgroupsPath = filepath.Join(cgroupsRootPath, "slurm")
- }
- }
+ // Get SLURM's cgroup details
+ cgroupFS := slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
+ level.Info(logger).Log("cgroup", cgroupFS.mode, "mount", cgroupFS.mount)
// Attempt to get GPU devices
var gpuTypes []string
@@ -227,15 +197,12 @@ func NewSlurmCollector(logger log.Logger) (Collector, error) {
defer file.Close()
return &slurmCollector{
- cgroups: cgroupsVersion,
- cgroupsRootPath: cgroupsRootPath,
- slurmCgroupsPath: slurmCgroupsPath,
- manager: slurmCollectorSubsystem,
- hostname: hostname,
- gpuDevs: gpuDevs,
- hostMemTotal: memTotal,
- procFS: procFS,
- jobsCache: make(map[string]jobProps),
+ cgroupFS: cgroupFS,
+ hostname: hostname,
+ gpuDevs: gpuDevs,
+ hostMemTotal: memTotal,
+ procFS: procFS,
+ jobsCache: make(map[string]jobProps),
numJobs: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, genericSubsystem, "units"),
"Total number of jobs",
@@ -370,7 +337,7 @@ func (c *slurmCollector) Update(ch chan<- prometheus.Metric) error {
}
// First send num jobs on the current host
- ch <- prometheus.MustNewConstMetric(c.numJobs, prometheus.GaugeValue, float64(len(metrics)), c.manager, c.hostname)
+ ch <- prometheus.MustNewConstMetric(c.numJobs, prometheus.GaugeValue, float64(len(metrics)), c.cgroupFS.manager, c.hostname)
// Send metrics of each cgroup
for _, m := range metrics {
@@ -379,40 +346,40 @@ func (c *slurmCollector) Update(ch chan<- prometheus.Metric) error {
}
// CPU stats
- ch <- prometheus.MustNewConstMetric(c.jobCPUUser, prometheus.CounterValue, m.cpuUser, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobCPUSystem, prometheus.CounterValue, m.cpuSystem, c.manager, c.hostname, m.jobuuid)
- // ch <- prometheus.MustNewConstMetric(c.cpuTotal, prometheus.GaugeValue, m.cpuTotal, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobCPUs, prometheus.GaugeValue, float64(m.cpus), c.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobCPUUser, prometheus.CounterValue, m.cpuUser, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobCPUSystem, prometheus.CounterValue, m.cpuSystem, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ // ch <- prometheus.MustNewConstMetric(c.cpuTotal, prometheus.GaugeValue, m.cpuTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobCPUs, prometheus.GaugeValue, float64(m.cpus), c.cgroupFS.manager, c.hostname, m.jobuuid)
// Memory stats
- ch <- prometheus.MustNewConstMetric(c.jobMemoryRSS, prometheus.GaugeValue, m.memoryRSS, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryCache, prometheus.GaugeValue, m.memoryCache, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryUsed, prometheus.GaugeValue, m.memoryUsed, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryTotal, prometheus.GaugeValue, m.memoryTotal, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryFailCount, prometheus.GaugeValue, m.memoryFailCount, c.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryRSS, prometheus.GaugeValue, m.memoryRSS, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryCache, prometheus.GaugeValue, m.memoryCache, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryUsed, prometheus.GaugeValue, m.memoryUsed, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryTotal, prometheus.GaugeValue, m.memoryTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryFailCount, prometheus.GaugeValue, m.memoryFailCount, c.cgroupFS.manager, c.hostname, m.jobuuid)
// PSI stats. Push them only if they are available
if *collectSwapMemoryStatsDepre || *collectSwapMemoryStats {
- ch <- prometheus.MustNewConstMetric(c.jobMemswUsed, prometheus.GaugeValue, m.memswUsed, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemswTotal, prometheus.GaugeValue, m.memswTotal, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemswFailCount, prometheus.GaugeValue, m.memswFailCount, c.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemswUsed, prometheus.GaugeValue, m.memswUsed, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemswTotal, prometheus.GaugeValue, m.memswTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemswFailCount, prometheus.GaugeValue, m.memswFailCount, c.cgroupFS.manager, c.hostname, m.jobuuid)
}
if *collectPSIStatsDepre || *collectPSIStats {
- ch <- prometheus.MustNewConstMetric(c.jobCPUPressure, prometheus.GaugeValue, m.cpuPressure, c.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryPressure, prometheus.GaugeValue, m.memoryPressure, c.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobCPUPressure, prometheus.GaugeValue, m.cpuPressure, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ ch <- prometheus.MustNewConstMetric(c.jobMemoryPressure, prometheus.GaugeValue, m.memoryPressure, c.cgroupFS.manager, c.hostname, m.jobuuid)
}
// RDMA stats
for device, handles := range m.rdmaHCAHandles {
if handles > 0 {
- ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAHandles, prometheus.GaugeValue, handles, c.manager, c.hostname, m.jobuuid, device)
+ ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAHandles, prometheus.GaugeValue, handles, c.cgroupFS.manager, c.hostname, m.jobuuid, device)
}
}
for device, objects := range m.rdmaHCAHandles {
if objects > 0 {
- ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAObjects, prometheus.GaugeValue, objects, c.manager, c.hostname, m.jobuuid, device)
+ ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAObjects, prometheus.GaugeValue, objects, c.cgroupFS.manager, c.hostname, m.jobuuid, device)
}
}
@@ -428,7 +395,7 @@ func (c *slurmCollector) Update(ch chan<- prometheus.Metric) error {
break
}
}
- ch <- prometheus.MustNewConstMetric(c.jobGpuFlag, prometheus.GaugeValue, float64(1), c.manager, c.hostname, m.jobuuid, gpuOrdinal, fmt.Sprintf("%s-gpu-%s", c.hostname, gpuOrdinal), uuid)
+ ch <- prometheus.MustNewConstMetric(c.jobGpuFlag, prometheus.GaugeValue, float64(1), c.cgroupFS.manager, c.hostname, m.jobuuid, gpuOrdinal, fmt.Sprintf("%s-gpu-%s", c.hostname, gpuOrdinal), uuid)
}
}
}
@@ -452,21 +419,21 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
var gpuOrdinals []string
- level.Debug(c.logger).Log("msg", "Loading cgroup", "path", c.slurmCgroupsPath)
+ level.Debug(c.logger).Log("msg", "Loading cgroup", "path", c.cgroupFS.mount)
// Walk through all cgroups and get cgroup paths
- if err := filepath.WalkDir(c.slurmCgroupsPath, func(p string, info fs.DirEntry, err error) error {
+ if err := filepath.WalkDir(c.cgroupFS.mount, func(p string, info fs.DirEntry, err error) error {
if err != nil {
return err
}
// Ignore step jobs
- if !info.IsDir() || strings.Contains(p, "/step_") {
+ if !info.IsDir() || c.cgroupFS.pathFilter(p) {
return nil
}
// Get relative path of cgroup
- rel, err := filepath.Rel(c.cgroupsRootPath, p)
+ rel, err := filepath.Rel(c.cgroupFS.root, p)
if err != nil {
level.Error(c.logger).Log("msg", "Failed to resolve relative path for cgroup", "path", p, "err", err)
@@ -474,7 +441,7 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
}
// Get cgroup ID which is job ID
- cgroupIDMatches := slurmCgroupPathRegex.FindStringSubmatch(p)
+ cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(p)
if len(cgroupIDMatches) <= 1 {
return nil
}
@@ -486,6 +453,11 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
return nil
}
+ // Check if we already passed through this job
+ if slices.Contains(activeJobUUIDs, jobuuid) {
+ return nil
+ }
+
// Get GPU ordinals of the job
if len(c.gpuDevs) > 0 {
if props, ok := c.jobsCache[jobuuid]; !ok || (ok && !c.containsGPUOrdinals(props)) {
@@ -504,7 +476,7 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
return nil
}); err != nil {
level.Error(c.logger).
- Log("msg", "Error walking cgroup subsystem", "path", c.slurmCgroupsPath, "err", err)
+ Log("msg", "Error walking cgroup subsystem", "path", c.cgroupFS.mount, "err", err)
return nil, err
}
@@ -538,7 +510,7 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
// getMetrics fetches metrics of a given SLURM cgroups path.
func (c *slurmCollector) getMetrics(metric *CgroupMetric) {
- if c.cgroups == "v2" {
+ if c.cgroupFS.mode == cgroups.Unified {
c.getCgroupsV2Metrics(metric)
} else {
c.getCgroupsV1Metrics(metric)
@@ -591,7 +563,7 @@ func (c *slurmCollector) parseCPUSet(cpuset string) ([]string, error) {
// getCPUs returns list of CPUs in the cgroup.
func (c *slurmCollector) getCPUs(path string) ([]string, error) {
var cpusPath string
- if c.cgroups == "v2" {
+ if c.cgroupFS.mode == cgroups.Unified {
cpusPath = fmt.Sprintf("%s%s/cpuset.cpus.effective", *cgroupfsPath, path)
} else {
cpusPath = fmt.Sprintf("%s/cpuset%s/cpuset.cpus", *cgroupfsPath, path)
diff --git a/pkg/collector/slurm_test.go b/pkg/collector/slurm_test.go
index 90ae5223..0c8208be 100644
--- a/pkg/collector/slurm_test.go
+++ b/pkg/collector/slurm_test.go
@@ -74,13 +74,11 @@ func TestCgroupsV2SlurmJobMetrics(t *testing.T) {
require.NoError(t, err)
c := slurmCollector{
- cgroups: "v2",
- gpuDevs: mockGPUDevices(),
- cgroupsRootPath: *cgroupfsPath,
- hostMemTotal: float64(123456),
- slurmCgroupsPath: *cgroupfsPath + "/system.slice/slurmstepd.scope",
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
+ gpuDevs: mockGPUDevices(),
+ hostMemTotal: float64(123456),
+ logger: log.NewNopLogger(),
+ jobsCache: make(map[string]jobProps),
}
expectedSlurmMetrics = CgroupMetric{
@@ -133,14 +131,12 @@ func TestCgroupsV2SlurmJobMetricsWithProcFs(t *testing.T) {
require.NoError(t, err)
c := slurmCollector{
- cgroups: "v2",
- cgroupsRootPath: *cgroupfsPath,
- gpuDevs: mockGPUDevices(),
- hostMemTotal: float64(123456),
- slurmCgroupsPath: *cgroupfsPath + "/system.slice/slurmstepd.scope",
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
- procFS: procFS,
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
+ gpuDevs: mockGPUDevices(),
+ hostMemTotal: float64(123456),
+ logger: log.NewNopLogger(),
+ jobsCache: make(map[string]jobProps),
+ procFS: procFS,
}
expectedSlurmMetrics = CgroupMetric{
@@ -189,12 +185,10 @@ func TestCgroupsV2SlurmJobMetricsNoJobProps(t *testing.T) {
require.NoError(t, err)
c := slurmCollector{
- cgroups: "v2",
- cgroupsRootPath: *cgroupfsPath,
- gpuDevs: mockGPUDevices(),
- slurmCgroupsPath: *cgroupfsPath + "/system.slice/slurmstepd.scope",
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
+ gpuDevs: mockGPUDevices(),
+ logger: log.NewNopLogger(),
+ jobsCache: make(map[string]jobProps),
}
expectedSlurmMetrics = CgroupMetric{
@@ -246,13 +240,11 @@ func TestCgroupsV1SlurmJobMetrics(t *testing.T) {
require.NoError(t, err)
c := slurmCollector{
- cgroups: "v1",
- logger: log.NewNopLogger(),
- gpuDevs: mockGPUDevices(),
- cgroupsRootPath: *cgroupfsPath + "/cpuacct",
- slurmCgroupsPath: *cgroupfsPath + "/cpuacct/slurm",
- jobsCache: make(map[string]jobProps),
- procFS: procFS,
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
+ logger: log.NewNopLogger(),
+ gpuDevs: mockGPUDevices(),
+ jobsCache: make(map[string]jobProps),
+ procFS: procFS,
}
expectedSlurmMetrics = CgroupMetric{
@@ -313,12 +305,10 @@ func TestJobPropsCaching(t *testing.T) {
mockGPUDevs := mockGPUDevices()
c := slurmCollector{
- cgroups: "v1",
- logger: log.NewNopLogger(),
- gpuDevs: mockGPUDevs,
- cgroupsRootPath: *cgroupfsPath + "/cpuacct",
- slurmCgroupsPath: *cgroupfsPath + "/cpuacct/slurm",
- jobsCache: make(map[string]jobProps),
+ cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
+ logger: log.NewNopLogger(),
+ gpuDevs: mockGPUDevs,
+ jobsCache: make(map[string]jobProps),
}
// Add cgroups
diff --git a/pkg/collector/testdata/proc.ttar b/pkg/collector/testdata/proc.ttar
index a4d1797e..ff1d2ccc 100644
--- a/pkg/collector/testdata/proc.ttar
+++ b/pkg/collector/testdata/proc.ttar
@@ -10561,6 +10561,47 @@ xpc 399724544 92823103 86219234
debug 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: proc/kallsyms
+Lines: 37
+ffffffffa94ddd10 t __pfx_vfs_readv
+ffffffffa94ddd20 t vfs_readv
+ffffffffa94e0430 T __pfx_vfs_read
+ffffffffa94e0440 T vfs_read
+ffffffffa94fbce0 T __pfx_vfs_readlink
+ffffffffa94fbcf0 T vfs_readlink
+ffffffffaad9c2b8 r __ksymtab_vfs_readlink
+ffffffffa94de9c0 t __pfx_vfs_writev
+ffffffffa94de9d0 t vfs_writev
+ffffffffa94e0ef0 T __pfx_vfs_write
+ffffffffa94e0f00 T vfs_write
+ffffffffa94dbf30 T __pfx_vfs_open
+ffffffffa94dbf40 T vfs_open
+ffffffffa94f5060 T __pfx_vfs_create
+ffffffffa94f5070 T vfs_create
+ffffffffa95131c0 T __pfx_vfs_create_mount
+ffffffffa95131d0 T vfs_create_mount
+ffffffffaad9c144 r __ksymtab_vfs_create
+ffffffffaad9c150 r __ksymtab_vfs_create_mount
+ffffffffa94f4860 T __pfx_vfs_unlink
+ffffffffa94f4870 T vfs_unlink
+ffffffffaad9c300 r __ksymtab_vfs_unlink
+ffffffffa9efa6c0 t __pfx___netif_receive_skb_core.constprop.0
+ffffffffa9efa6d0 t __netif_receive_skb_core.constprop.0
+ffffffffa9efbaa0 T __pfx_netif_receive_skb_core
+ffffffffa9efbab0 T netif_receive_skb_core
+ffffffffaad965ac r __ksymtab_netif_receive_skb_core
+ffffffffa9ef0a80 T __pfx_dev_queue_xmit_nit
+ffffffffa9ef0a90 T dev_queue_xmit_nit
+ffffffffa9ef95e0 T __pfx___dev_queue_xmit
+ffffffffa9ef95f0 T __dev_queue_xmit
+ffffffffa9f80430 T __pfx_tcf_dev_queue_xmit
+ffffffffa9f80440 T tcf_dev_queue_xmit
+ffffffffaa20e77a t dev_queue_xmit_nit.cold
+ffffffffaad8a984 r __ksymtab___dev_queue_xmit
+ffffffffaada69b0 r __ksymtab_dev_queue_xmit_nit
+ffffffffaadb3184 r __ksymtab_tcf_dev_queue_xmitEOF
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: proc/loadavg
Lines: 1
0.02 0.04 0.05 1/497 11947
@@ -11587,6 +11628,11 @@ Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: proc/version_signature
+Lines: 1
+Ubuntu 6.5.0-35.35~22.04.1-generic 6.5.13
+Mode: 444
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: proc/zoneinfo
Lines: 262
Node 0, zone DMA
From 3057dea174462c71cf120d1921d776feb044ca17 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 17:54:39 +0200
Subject: [PATCH 02/18] ci: Install clang in CI when not found
Signed-off-by: Mahendra Paipuri
---
Makefile.common | 19 +++++++++++++------
scripts/install_clang.sh | 14 ++++++++++++++
2 files changed, 27 insertions(+), 6 deletions(-)
create mode 100755 scripts/install_clang.sh
diff --git a/Makefile.common b/Makefile.common
index 2aab8e29..e7507190 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -184,7 +184,7 @@ common-unused:
# Dont bother updating swagger docs for release builds
.PHONY: common-build
-common-build: promu swag
+common-build: promu swag bpf
ifeq ($(RELEASE_BUILD), 0)
ifeq ($(CGO_BUILD), 1)
@echo ">> updating swagger docs"
@@ -193,11 +193,6 @@ ifeq ($(CGO_BUILD), 1)
endif
@echo ">> building test binaries"
$(PROMU_TEST) build --prefix $(PREFIX) $(PROMU_BINARIES)
-endif
-ifeq ($(CGO_BUILD), 0)
- @echo ">> building bpf assets"
- $(MAKE) -C ./pkg/collector/bpf clean
- $(MAKE) -C ./pkg/collector/bpf
endif
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
@@ -251,6 +246,18 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
+.PHONY: bpf
+bpf:
+ifeq ($(CGO_BUILD), 0)
+ifeq (, $(shell command -v clang))
+ @echo ">> installing clang 18"
+ @./scripts/install_clang.sh
+endif
+ @echo ">> building bpf assets using clang"
+ $(MAKE) -C ./pkg/collector/bpf clean
+ $(MAKE) -C ./pkg/collector/bpf
+endif
+
# Dont run swagger for release builds. This is due to cross compiling with GOARCH set
# to different archs and swag will be built in arch specific bin folder.
.PHONY: swag
diff --git a/scripts/install_clang.sh b/scripts/install_clang.sh
new file mode 100755
index 00000000..5259f2df
--- /dev/null
+++ b/scripts/install_clang.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -exo pipefail
+
+# Install clang stable version dependencies
+apt-get update && apt-get install -y --no-install-recommends \
+ wget lsb-release wget software-properties-common gnupg \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Install clang 18
+bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
+
+# Create necessary symlinks
+ln -vsnf /usr/lib/llvm-18/bin/clang /usr/bin/clang
+ln -vsnf /usr/lib/llvm-18/bin/llc /usr/bin/llc
From 302a3d01d4cc2f47358caee8a0795cde680f35a9 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 17:55:01 +0200
Subject: [PATCH 03/18] build: Add a dummy object file for linter to pass
Signed-off-by: Mahendra Paipuri
---
pkg/collector/bpf/.gitignore | 3 ++-
pkg/collector/bpf/objs/keep.o | 0
pkg/collector/ebpf.go | 18 +++++++-----------
website/docs/00-introduction.md | 2 +-
4 files changed, 10 insertions(+), 13 deletions(-)
create mode 100644 pkg/collector/bpf/objs/keep.o
diff --git a/pkg/collector/bpf/.gitignore b/pkg/collector/bpf/.gitignore
index d93f0bdb..a3faf1fb 100644
--- a/pkg/collector/bpf/.gitignore
+++ b/pkg/collector/bpf/.gitignore
@@ -1,3 +1,4 @@
# Ignore objs and deps
-objs/
+**/bpf_*.o
+**/bpf_*.ll
deps/
diff --git a/pkg/collector/bpf/objs/keep.o b/pkg/collector/bpf/objs/keep.o
new file mode 100644
index 00000000..e69de29b
diff --git a/pkg/collector/ebpf.go b/pkg/collector/ebpf.go
index 7062a1d9..f9b70c98 100644
--- a/pkg/collector/ebpf.go
+++ b/pkg/collector/ebpf.go
@@ -27,7 +27,7 @@ import (
// Embed the entire objs directory.
//
-//go:embed bpf/objs
+//go:embed bpf/objs/*.o
var objsFS embed.FS
const (
@@ -240,8 +240,7 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
continue
}
- links[kernFuncName], err = link.Kprobe(kernFuncName, prog, nil)
- if err != nil {
+ if links[kernFuncName], err = link.Kprobe(kernFuncName, prog, nil); err != nil {
level.Error(logger).Log("msg", "Failed to open kprobe", "func", kernFuncName, "err", err)
}
}
@@ -257,8 +256,7 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
continue
}
- links[kernFuncName], err = link.Kretprobe(kernFuncName, prog, nil)
- if err != nil {
+ if links[kernFuncName], err = link.Kretprobe(kernFuncName, prog, nil); err != nil {
level.Error(logger).Log("msg", "Failed to open kretprobe", "func", kernFuncName, "err", err)
}
}
@@ -267,11 +265,10 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
// fentry/* programs
if strings.HasPrefix(name, "fentry") {
kernFuncName := strings.TrimPrefix(name, "fentry_")
- links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
+ if links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
Program: prog,
AttachType: ebpf.AttachTraceFEntry,
- })
- if err != nil {
+ }); err != nil {
level.Error(logger).Log("msg", "Failed to open fentry", "func", kernFuncName, "err", err)
}
}
@@ -279,11 +276,10 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
// fexit/* programs
if strings.HasPrefix(name, "fexit") {
kernFuncName := strings.TrimPrefix(name, "fexit_")
- links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
+ if links[kernFuncName], err = link.AttachTracing(link.TracingOptions{
Program: prog,
AttachType: ebpf.AttachTraceFExit,
- })
- if err != nil {
+ }); err != nil {
level.Error(logger).Log("msg", "Failed to open fexit", "func", kernFuncName, "err", err)
}
}
diff --git a/website/docs/00-introduction.md b/website/docs/00-introduction.md
index d052b3dd..ac5f28e1 100644
--- a/website/docs/00-introduction.md
+++ b/website/docs/00-introduction.md
@@ -9,7 +9,7 @@ slug: /
| | |
| ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| CI/CD | [![ci](https://github.com/mahendrapaipuri/ceems/workflows/CI/badge.svg)](https://github.com/mahendrapaipuri/ceems) [![CircleCI](https://dl.circleci.com/status-badge/img/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main.svg?style=svg&circle-token=28db7268f3492790127da28e62e76b0991d59c8b)](https://dl.circleci.com/status-badge/redirect/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main) [![Coverage](https://img.shields.io/badge/Coverage-75.9%25-brightgreen)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) |
+| CI/CD | [![ci](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) [![CircleCI](https://dl.circleci.com/status-badge/img/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main.svg?style=svg&circle-token=28db7268f3492790127da28e62e76b0991d59c8b)](https://dl.circleci.com/status-badge/redirect/circleci/8jSYT1wyKY8mKQRTqNLThX/TzM1Mr3AEAqmehnoCde19R/tree/main) [![Coverage](https://img.shields.io/badge/Coverage-75.9%25-brightgreen)](https://github.com/mahendrapaipuri/ceems/actions/workflows/ci.yml?query=branch%3Amain) |
| Docs | [![docs](https://img.shields.io/badge/docs-passing-green?style=flat&link=https://github.com/mahendrapaipuri/ceems/blob/main/README.md)](https://github.com/mahendrapaipuri/ceems/blob/main/README.md) |
| Package | [![Release](https://img.shields.io/github/v/release/mahendrapaipuri/ceems.svg?include_prereleases)](https://github.com/mahendrapaipuri/ceems/releases/latest) |
| Meta | [![GitHub License](https://img.shields.io/github/license/mahendrapaipuri/ceems)](https://github.com/mahendrapaipuri/ceems) [![Go Report Card](https://goreportcard.com/badge/github.com/mahendrapaipuri/ceems)](https://goreportcard.com/report/github.com/mahendrapaipuri/ceems) [![code style](https://img.shields.io/badge/code%20style-gofmt-blue.svg)](https://pkg.go.dev/cmd/gofmt) |
From cda0ea0ff1b00ec63850a399b79f939f4d402fb0 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 18:17:43 +0200
Subject: [PATCH 04/18] test: Make bpf assets before running unit tests
Signed-off-by: Mahendra Paipuri
---
Makefile | 4 ++--
internal/osexec/osexec.go | 19 +++++++++++++++++++
internal/osexec/osexec_test.go | 29 +++++++++++++++++++++++------
3 files changed, 44 insertions(+), 8 deletions(-)
diff --git a/Makefile b/Makefile
index 3694b99a..5628414b 100644
--- a/Makefile
+++ b/Makefile
@@ -117,13 +117,13 @@ coverage:
$(GO) tool cover -func=coverage.out -o=coverage.out
.PHONY: test
-test: pkg/collector/testdata/sys/.unpacked pkg/collector/testdata/proc/.unpacked
+test: pkg/collector/testdata/sys/.unpacked pkg/collector/testdata/proc/.unpacked bpf
@echo ">> running tests"
$(GO) test -short $(test-flags) $(pkgs)
cat $(coverage-file).tmp | grep -v "main.go" > $(coverage-file)
.PHONY: test-32bit
-test-32bit: pkg/collector/testdata/sys/.unpacked
+test-32bit: pkg/collector/testdata/sys/.unpacked pkg/collector/testdata/proc/.unpacked bpf
@echo ">> running tests in 32-bit mode"
@env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs)
diff --git a/internal/osexec/osexec.go b/internal/osexec/osexec.go
index ad19c05d..543cf40c 100644
--- a/internal/osexec/osexec.go
+++ b/internal/osexec/osexec.go
@@ -3,6 +3,7 @@ package osexec
import (
"context"
+ "errors"
"math"
"os"
"os/exec"
@@ -18,6 +19,12 @@ const (
sudoCmd = "sudo"
)
+// Custom errors.
+var (
+ ErrInvalidUID = errors.New("invalid UID")
+ ErrInvalidGID = errors.New("invalid GID")
+)
+
// Execute command and return stdout/stderr.
func Execute(cmd string, args []string, env []string, logger log.Logger) ([]byte, error) {
level.Debug(logger).Log("msg", "Executing", "command", cmd, "args", strings.Join(args, " "))
@@ -63,10 +70,14 @@ func ExecuteAs(cmd string, args []string, uid int, gid int, env []string, logger
var uidInt32, gidInt32 uint32
if uid > 0 && uid <= math.MaxInt32 {
uidInt32 = uint32(uid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidUID
}
if gid > 0 && gid <= math.MaxInt32 {
gidInt32 = uint32(gid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidGID
}
// According to setpgid docs (https://man7.org/linux/man-pages/man2/setpgid.2.html)
@@ -153,10 +164,14 @@ func ExecuteAsContext(
var uidInt32, gidInt32 uint32
if uid > 0 && uid <= math.MaxInt32 {
uidInt32 = uint32(uid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidUID
}
if gid > 0 && gid <= math.MaxInt32 {
gidInt32 = uint32(gid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidGID
}
// According to setpgid docs (https://man7.org/linux/man-pages/man2/setpgid.2.html)
@@ -268,10 +283,14 @@ func ExecuteAsWithTimeout(
var uidInt32, gidInt32 uint32
if uid > 0 && uid <= math.MaxInt32 {
uidInt32 = uint32(uid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidUID
}
if gid > 0 && gid <= math.MaxInt32 {
gidInt32 = uint32(gid) //nolint:gosec
+ } else {
+ return nil, ErrInvalidGID
}
// According to setpgid docs (https://man7.org/linux/man-pages/man2/setpgid.2.html)
diff --git a/internal/osexec/osexec_test.go b/internal/osexec/osexec_test.go
index da42383a..75e96e05 100644
--- a/internal/osexec/osexec_test.go
+++ b/internal/osexec/osexec_test.go
@@ -2,6 +2,7 @@ package osexec
import (
"context"
+ "os/user"
"strings"
"testing"
"time"
@@ -30,11 +31,19 @@ func TestExecute(t *testing.T) {
func TestExecuteAs(t *testing.T) {
// Test invalid uid/gid
- _, err := ExecuteAs("sleep", []string{"5"}, -65534, 65534, nil, log.NewNopLogger())
+ _, err := ExecuteAs("sleep", []string{"0.001"}, -65534, 65534, nil, log.NewNopLogger())
require.Error(t, err, "expected error due to invalid uid")
- _, err = ExecuteAs("sleep", []string{"5"}, 65534, 65534, nil, log.NewNopLogger())
- require.Error(t, err, "expected error executing as nobody user")
+ // Get current user
+ currentUser, err := user.Current()
+ require.NoError(t, err)
+
+ _, err = ExecuteAs("sleep", []string{"0.001"}, 65534, 65534, nil, log.NewNopLogger())
+ if currentUser.Uid == "0" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err, "expected error executing as nobody user")
+ }
}
func TestExecuteContext(t *testing.T) {
@@ -60,10 +69,18 @@ func TestExecuteWithTimeout(t *testing.T) {
func TestExecuteAsWithTimeout(t *testing.T) {
// Test invalid uid/gid
- _, err := ExecuteAsWithTimeout("sleep", []string{"5"}, -65534, 65534, 2, nil, log.NewNopLogger())
+ _, err := ExecuteAsWithTimeout("sleep", []string{"0.001"}, -65534, 65534, 2, nil, log.NewNopLogger())
require.Error(t, err, "expected error due to invalid uid")
+ // Get current user
+ currentUser, err := user.Current()
+ require.NoError(t, err)
+
// Test successful command execution
- _, err = ExecuteAsWithTimeout("sleep", []string{"5"}, 65534, 65534, 2, nil, log.NewNopLogger())
- require.Error(t, err, "expected error executing as nobody user")
+ _, err = ExecuteAsWithTimeout("sleep", []string{"0.001"}, 65534, 65534, 2, nil, log.NewNopLogger())
+ if currentUser.Uid == "0" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err, "expected error executing as nobody user")
+ }
}
From 4d0bafee11dceba243b351d99986b199dd8a7181 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 18:28:20 +0200
Subject: [PATCH 05/18] ci: Always install clang in CI
Signed-off-by: Mahendra Paipuri
---
.circleci/config.yml | 4 ++--
Makefile.common | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index fadf00d2..8be8b553 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -19,14 +19,14 @@ jobs:
steps:
- prometheus/setup_environment
- run: go mod download
- - run: make
+ - run: sudo make
- run: CGO_BUILD=1 make
test-arm:
executor: arm
steps:
- checkout
- run: uname -a
- - run: make
+ - run: sudo make
- run: CGO_BUILD=1 make
build:
machine:
diff --git a/Makefile.common b/Makefile.common
index e7507190..c1650d0f 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -249,7 +249,7 @@ $(PROMU):
.PHONY: bpf
bpf:
ifeq ($(CGO_BUILD), 0)
-ifeq (, $(shell command -v clang))
+ifeq ($(CI), true)
@echo ">> installing clang 18"
@./scripts/install_clang.sh
endif
From 73724d4cb11ad94c7b1d6a79962e2e2c5fa7c898 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 18:34:59 +0200
Subject: [PATCH 06/18] ci: Add sudo prefix when running as non root
Signed-off-by: Mahendra Paipuri
---
.circleci/config.yml | 4 ++--
scripts/install_clang.sh | 6 +++++-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8be8b553..fadf00d2 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -19,14 +19,14 @@ jobs:
steps:
- prometheus/setup_environment
- run: go mod download
- - run: sudo make
+ - run: make
- run: CGO_BUILD=1 make
test-arm:
executor: arm
steps:
- checkout
- run: uname -a
- - run: sudo make
+ - run: make
- run: CGO_BUILD=1 make
build:
machine:
diff --git a/scripts/install_clang.sh b/scripts/install_clang.sh
index 5259f2df..d9d7b9c7 100755
--- a/scripts/install_clang.sh
+++ b/scripts/install_clang.sh
@@ -7,7 +7,11 @@ apt-get update && apt-get install -y --no-install-recommends \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install clang 18
-bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
+SUDO=''
+if (( $EUID != 0 )); then
+ SUDO='sudo'
+fi
+$SUDO bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
# Create necessary symlinks
ln -vsnf /usr/lib/llvm-18/bin/clang /usr/bin/clang
From 6f37264753d2a122ed4f09c463e5eac6eb86a28a Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 18:37:41 +0200
Subject: [PATCH 07/18] build: Use sudo prefix everywhere in clang install
script
Signed-off-by: Mahendra Paipuri
---
scripts/install_clang.sh | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/scripts/install_clang.sh b/scripts/install_clang.sh
index d9d7b9c7..c3b280d8 100755
--- a/scripts/install_clang.sh
+++ b/scripts/install_clang.sh
@@ -1,18 +1,20 @@
#!/bin/bash
set -exo pipefail
+# Setup sudo prefix
+SUDO=''
+if (( $EUID != 0 )); then
+ SUDO='sudo'
+fi
+
# Install clang stable version dependencies
-apt-get update && apt-get install -y --no-install-recommends \
+$SUDO apt-get update && $SUDO apt-get install -y --no-install-recommends \
wget lsb-release wget software-properties-common gnupg \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install clang 18
-SUDO=''
-if (( $EUID != 0 )); then
- SUDO='sudo'
-fi
$SUDO bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
# Create necessary symlinks
-ln -vsnf /usr/lib/llvm-18/bin/clang /usr/bin/clang
-ln -vsnf /usr/lib/llvm-18/bin/llc /usr/bin/llc
+$SUDO ln -vsnf /usr/lib/llvm-18/bin/clang /usr/bin/clang
+$SUDO ln -vsnf /usr/lib/llvm-18/bin/llc /usr/bin/llc
From 0ce1c31b2a3ef40e7dedaaaa6f6e731fe0a1b21d Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 20:31:51 +0200
Subject: [PATCH 08/18] ci: Add sample object file in lint job
Signed-off-by: Mahendra Paipuri
---
.github/workflows/step_tests-lint.yml | 5 +
Makefile.common | 4 +-
pkg/collector/bpf/.gitignore | 3 +-
pkg/collector/bpf/Makefile.common | 20 +--
pkg/collector/bpf/objs/keep.o | 0
pkg/collector/cgroup.go | 6 +
pkg/collector/ebpf.go | 194 +++++++++++++++-----------
pkg/collector/ebpf_test.go | 1 +
pkg/collector/perf.go | 18 ++-
pkg/collector/perf_test.go | 4 +-
10 files changed, 153 insertions(+), 102 deletions(-)
delete mode 100644 pkg/collector/bpf/objs/keep.o
diff --git a/.github/workflows/step_tests-lint.yml b/.github/workflows/step_tests-lint.yml
index 814ccc92..11c6a1e9 100644
--- a/.github/workflows/step_tests-lint.yml
+++ b/.github/workflows/step_tests-lint.yml
@@ -18,6 +18,11 @@ jobs:
with:
go-version: 1.22.x
+ - name: Create a sample object file
+ run: |
+ mkdir -p pkg/collector/bpf/objs
+ touch pkg/collector/bpf/objs/sample.o
+
- name: Lint
uses: golangci/golangci-lint-action@v6
with:
diff --git a/Makefile.common b/Makefile.common
index c1650d0f..96d95a66 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -48,7 +48,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.54.2
+GOLANGCI_LINT_VERSION ?= v1.60.3
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -249,7 +249,7 @@ $(PROMU):
.PHONY: bpf
bpf:
ifeq ($(CGO_BUILD), 0)
-ifeq ($(CI), true)
+ifeq (, $(shell command -v clang > /dev/null))
@echo ">> installing clang 18"
@./scripts/install_clang.sh
endif
diff --git a/pkg/collector/bpf/.gitignore b/pkg/collector/bpf/.gitignore
index a3faf1fb..d93f0bdb 100644
--- a/pkg/collector/bpf/.gitignore
+++ b/pkg/collector/bpf/.gitignore
@@ -1,4 +1,3 @@
# Ignore objs and deps
-**/bpf_*.o
-**/bpf_*.ll
+objs/
deps/
diff --git a/pkg/collector/bpf/Makefile.common b/pkg/collector/bpf/Makefile.common
index 7e2bb221..5d2c2ce2 100644
--- a/pkg/collector/bpf/Makefile.common
+++ b/pkg/collector/bpf/Makefile.common
@@ -5,19 +5,19 @@ LLC ?= llc
# Build the BPF programs for the detected architecture, default to x86, and
# allow easy overriding by using ?= for cross-compilation
-# UNAME_M := $(shell uname -m)
-# ifeq ($(UNAME_M),x86_64)
-# BPF_TARGET_ARCH ?= x86
-# endif
-# ifeq ($(UNAME_M),aarch64)
-# BPF_TARGET_ARCH ?= arm64
-# endif
-
-# Get cross-compiling flags from GOARCH env variable
-# Endians are picked up https://github.com/cilium/ebpf/blob/625b0a910e1ba666e483e75b149880ce3b54dc85/cmd/bpf2go/gen/target.go#L14-L28
BPF_TARGET_ARCH ?= x86
BPF_TARGET_MARCH ?= bpf
BPF_TARGET_COMPILE ?= 1
+UNAME_M := $(shell uname -m)
+ifeq ($(UNAME_M),x86_64)
+ BPF_TARGET_ARCH ?= x86
+endif
+ifeq ($(UNAME_M),aarch64)
+ BPF_TARGET_ARCH ?= arm64
+endif
+
+# Get cross-compiling flags from GOARCH env variable
+# Endians are picked up https://github.com/cilium/ebpf/blob/625b0a910e1ba666e483e75b149880ce3b54dc85/cmd/bpf2go/gen/target.go#L14-L28
ifeq ($(GOARCH),386)
BPF_TARGET_ARCH = x86
BPF_TARGET_MARCH = bpfel
diff --git a/pkg/collector/bpf/objs/keep.o b/pkg/collector/bpf/objs/keep.o
deleted file mode 100644
index e69de29b..00000000
diff --git a/pkg/collector/cgroup.go b/pkg/collector/cgroup.go
index c709d312..8b7135b6 100644
--- a/pkg/collector/cgroup.go
+++ b/pkg/collector/cgroup.go
@@ -2,6 +2,7 @@ package collector
import (
"bufio"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -19,6 +20,11 @@ const (
cgroupSubSysCount = 15
)
+// Custom errors.
+var (
+ ErrInvalidCgroupFS = errors.New("invalid cgroup file system")
+)
+
// Regular expressions of cgroup paths for different resource managers
/*
For v1 possibilities are /cpuacct/slurm/uid_1000/job_211
diff --git a/pkg/collector/ebpf.go b/pkg/collector/ebpf.go
index f9b70c98..1f40d341 100644
--- a/pkg/collector/ebpf.go
+++ b/pkg/collector/ebpf.go
@@ -140,6 +140,13 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
}
+ // If no cgroupFS set return
+ if cgroupFS.root == "" {
+ level.Error(logger).Log("msg", "ebpf collector needs slurm collector. Enable it with --collector.slurm")
+
+ return nil, ErrInvalidCgroupFS
+ }
+
// Remove resource limits for kernels <5.11.
if err := rlimit.RemoveMemlock(); err != nil {
return nil, fmt.Errorf("error removing memlock: %w", err)
@@ -491,23 +498,26 @@ func (c *ebpfCollector) Stop(_ context.Context) error {
// updateVFSWrite updates VFS write metrics.
func (c *ebpfCollector) updateVFSWrite(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
var key bpfVfsEventKey
var value bpfVfsRwEvent
- if c.vfsColl != nil {
- if m, ok := c.vfsColl.Maps["write_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- mount := unix.ByteSliceToString(key.Mnt[:])
- ch <- prometheus.MustNewConstMetric(c.vfsWriteRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsWriteBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsWriteErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
- }
+ if m, ok := c.vfsColl.Maps["write_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ mount := unix.ByteSliceToString(key.Mnt[:])
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
}
}
}
@@ -517,23 +527,26 @@ func (c *ebpfCollector) updateVFSWrite(ch chan<- prometheus.Metric) error {
// updateVFSRead updates VFS read metrics.
func (c *ebpfCollector) updateVFSRead(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
var key bpfVfsEventKey
var value bpfVfsRwEvent
- if c.vfsColl != nil {
- if m, ok := c.vfsColl.Maps["read_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- mount := unix.ByteSliceToString(key.Mnt[:])
- ch <- prometheus.MustNewConstMetric(c.vfsReadRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsReadBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsReadErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
- }
+ if m, ok := c.vfsColl.Maps["read_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ mount := unix.ByteSliceToString(key.Mnt[:])
+ ch <- prometheus.MustNewConstMetric(c.vfsReadRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
}
}
}
@@ -543,21 +556,24 @@ func (c *ebpfCollector) updateVFSRead(ch chan<- prometheus.Metric) error {
// updateVFSOpen updates VFS open stats.
func (c *ebpfCollector) updateVFSOpen(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
var key uint32
var value bpfVfsInodeEvent
- if c.vfsColl != nil {
- if m, ok := c.vfsColl.Maps["open_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
- }
+ if m, ok := c.vfsColl.Maps["open_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
}
}
}
@@ -567,21 +583,24 @@ func (c *ebpfCollector) updateVFSOpen(ch chan<- prometheus.Metric) error {
// updateVFSCreate updates VFS create stats.
func (c *ebpfCollector) updateVFSCreate(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
var key uint32
var value bpfVfsInodeEvent
- if c.vfsColl != nil {
- if m, ok := c.vfsColl.Maps["create_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
- }
+ if m, ok := c.vfsColl.Maps["create_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
}
}
}
@@ -591,21 +610,24 @@ func (c *ebpfCollector) updateVFSCreate(ch chan<- prometheus.Metric) error {
// updateVFSUnlink updates VFS unlink stats.
func (c *ebpfCollector) updateVFSUnlink(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
var key uint32
var value bpfVfsInodeEvent
- if c.vfsColl != nil {
- if m, ok := c.vfsColl.Maps["unlink_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
- }
+ if m, ok := c.vfsColl.Maps["unlink_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
}
}
}
@@ -615,22 +637,25 @@ func (c *ebpfCollector) updateVFSUnlink(ch chan<- prometheus.Metric) error {
// updateNetIngress updates network ingress stats.
func (c *ebpfCollector) updateNetIngress(ch chan<- prometheus.Metric) error {
+ if c.netColl == nil {
+ return nil
+ }
+
var key bpfNetEventKey
var value bpfNetEvent
- if c.netColl != nil {
- if m, ok := c.netColl.Maps["ingress_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- device := unix.ByteSliceToString(key.Dev[:])
- ch <- prometheus.MustNewConstMetric(c.netIngressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
- ch <- prometheus.MustNewConstMetric(c.netIngressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
- }
+ if m, ok := c.netColl.Maps["ingress_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ device := unix.ByteSliceToString(key.Dev[:])
+ ch <- prometheus.MustNewConstMetric(c.netIngressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
+ ch <- prometheus.MustNewConstMetric(c.netIngressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
}
}
}
@@ -640,22 +665,25 @@ func (c *ebpfCollector) updateNetIngress(ch chan<- prometheus.Metric) error {
// updateNetEgress updates network egress stats.
func (c *ebpfCollector) updateNetEgress(ch chan<- prometheus.Metric) error {
+ if c.netColl == nil {
+ return nil
+ }
+
var key bpfNetEventKey
var value bpfNetEvent
- if c.netColl != nil {
- if m, ok := c.netColl.Maps["egress_accumulator"]; ok {
- defer m.Close()
-
- for m.Iterate().Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- device := unix.ByteSliceToString(key.Dev[:])
- ch <- prometheus.MustNewConstMetric(c.netEgressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
- ch <- prometheus.MustNewConstMetric(c.netEgressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
- }
+ if m, ok := c.netColl.Maps["egress_accumulator"]; ok {
+ defer m.Close()
+
+ entries := m.Iterate()
+ for entries.Next(&key, &value) {
+ cgroupID := uint64(key.Cid)
+ if slices.Contains(c.activeCgroups, cgroupID) {
+ uuid := c.inodesMap[cgroupID]
+ device := unix.ByteSliceToString(key.Dev[:])
+ ch <- prometheus.MustNewConstMetric(c.netEgressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
+ ch <- prometheus.MustNewConstMetric(c.netEgressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
}
}
}
diff --git a/pkg/collector/ebpf_test.go b/pkg/collector/ebpf_test.go
index f00ee032..bfa681a1 100644
--- a/pkg/collector/ebpf_test.go
+++ b/pkg/collector/ebpf_test.go
@@ -158,6 +158,7 @@ func TestNewEbpfCollector(t *testing.T) {
_, err := CEEMSExporterApp.Parse(
[]string{
"--path.cgroupfs", "testdata/sys/fs/cgroup",
+ "--collector.slurm",
"--collector.slurm.force-cgroups-version", "v2",
},
)
diff --git a/pkg/collector/perf.go b/pkg/collector/perf.go
index bcc5fc81..efff1489 100644
--- a/pkg/collector/perf.go
+++ b/pkg/collector/perf.go
@@ -126,9 +126,23 @@ var (
// NewPerfCollector returns a new perf based collector, it creates a profiler
// per compute unit.
func NewPerfCollector(logger log.Logger) (Collector, error) {
+ // Get cgroup file system
+ var cgroupFS cgroupFS
+ if *collectorState[slurmCollectorSubsystem] {
+ cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
+ }
+
+ // If no cgroupFS set return
+ if cgroupFS.root == "" {
+ level.Error(logger).Log("msg", "ebpf collector needs slurm collector. Enable it with --collector.slurm")
+
+ return nil, ErrInvalidCgroupFS
+ }
+
collector := &perfCollector{
logger: logger,
hostname: hostname,
+ cgroupFS: cgroupFS,
envVar: *perfProfilersEnvVars,
perfHwProfilersEnabled: *perfHwProfilersFlag,
perfSwProfilersEnabled: *perfSwProfilersFlag,
@@ -163,10 +177,6 @@ func NewPerfCollector(logger log.Logger) (Collector, error) {
}
}
- if *collectorState[slurmCollectorSubsystem] {
- collector.cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
- }
-
var err error
// Instantiate a new Proc FS
diff --git a/pkg/collector/perf_test.go b/pkg/collector/perf_test.go
index 168e06b3..f7e615e5 100644
--- a/pkg/collector/perf_test.go
+++ b/pkg/collector/perf_test.go
@@ -16,7 +16,9 @@ import (
)
func TestPerfCollector(t *testing.T) {
- _, err := CEEMSExporterApp.Parse([]string{"--path.procfs", "testdata/proc"})
+ _, err := CEEMSExporterApp.Parse([]string{
+ "--path.procfs", "testdata/proc", "--collector.slurm",
+ })
require.NoError(t, err)
collector, err := NewPerfCollector(log.NewNopLogger())
From 889076423c1697a0a436b7cfa9b1f5834a738c0e Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 21:13:25 +0200
Subject: [PATCH 09/18] build: Install clang only when not found
* Override BPF_TARGET_ARCH from uname for amd64 and arm64
Signed-off-by: Mahendra Paipuri
---
Makefile.common | 16 +++++++++++-----
pkg/collector/bpf/Makefile.common | 4 ++--
2 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/Makefile.common b/Makefile.common
index 96d95a66..076193e3 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -24,6 +24,7 @@ PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9]
PROMU := $(FIRST_GOPATH)/bin/promu
SWAG := $(FIRST_GOPATH)/bin/swag
+CLANG := $(firstword $(subst :, ,$(shell which clang)))
pkgs = ./...
ifeq (arm, $(GOHOSTARCH))
@@ -246,18 +247,23 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
+# Build bpf assets using clang
.PHONY: bpf
-bpf:
ifeq ($(CGO_BUILD), 0)
-ifeq (, $(shell command -v clang > /dev/null))
- @echo ">> installing clang 18"
- @./scripts/install_clang.sh
-endif
+bpf: clang
@echo ">> building bpf assets using clang"
$(MAKE) -C ./pkg/collector/bpf clean
$(MAKE) -C ./pkg/collector/bpf
endif
+# Install clang using script
+.PHONY: clang
+ifeq (, $(shell which clang))
+clang:
+ @echo ">> installing clang 18"
+ @./scripts/install_clang.sh
+endif
+
# Dont run swagger for release builds. This is due to cross compiling with GOARCH set
# to different archs and swag will be built in arch specific bin folder.
.PHONY: swag
diff --git a/pkg/collector/bpf/Makefile.common b/pkg/collector/bpf/Makefile.common
index 5d2c2ce2..e70b31f5 100644
--- a/pkg/collector/bpf/Makefile.common
+++ b/pkg/collector/bpf/Makefile.common
@@ -10,10 +10,10 @@ BPF_TARGET_MARCH ?= bpf
BPF_TARGET_COMPILE ?= 1
UNAME_M := $(shell uname -m)
ifeq ($(UNAME_M),x86_64)
- BPF_TARGET_ARCH ?= x86
+ BPF_TARGET_ARCH = x86
endif
ifeq ($(UNAME_M),aarch64)
- BPF_TARGET_ARCH ?= arm64
+ BPF_TARGET_ARCH = arm64
endif
# Get cross-compiling flags from GOARCH env variable
From 9fa2ce07dcbf42a775d4a4dda60641f0756bcfd3 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Thu, 5 Sep 2024 22:18:20 +0200
Subject: [PATCH 10/18] ci: Check clang version before installing clang 18
Signed-off-by: Mahendra Paipuri
---
Makefile.common | 7 +++----
scripts/install_clang.sh | 10 ++++++++++
2 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/Makefile.common b/Makefile.common
index 076193e3..442312a8 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -256,13 +256,12 @@ bpf: clang
$(MAKE) -C ./pkg/collector/bpf
endif
-# Install clang using script
+# Install clang using script. We chack the version of clang inside the script and ensure
+# that we install clang >= 18
.PHONY: clang
-ifeq (, $(shell which clang))
clang:
- @echo ">> installing clang 18"
+ @echo ">> installing clang"
@./scripts/install_clang.sh
-endif
# Dont run swagger for release builds. This is due to cross compiling with GOARCH set
# to different archs and swag will be built in arch specific bin folder.
diff --git a/scripts/install_clang.sh b/scripts/install_clang.sh
index c3b280d8..c1c10869 100755
--- a/scripts/install_clang.sh
+++ b/scripts/install_clang.sh
@@ -1,6 +1,16 @@
#!/bin/bash
set -exo pipefail
+# Check if clang exists. If it exists, we need to ensure that it
+# is at least of version >= 18
+if [ -x "$(command -v clang)" ]; then
+ clang_major_version=$(clang -v 2>&1 | grep version | grep -o "[0-9]\+\.[0-9]\+\.[0-9]\+" | cut -d "." -f1)
+ if (( ${clang_major_version} >= 18 )); then
+ echo "clang >=18 already installed. Skipping installation...."
+ exit 0
+ fi
+fi
+
# Setup sudo prefix
SUDO=''
if (( $EUID != 0 )); then
From 89f70218c8cc851e01bf5d4bb96f21094a234244 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Sun, 15 Sep 2024 18:22:44 +0200
Subject: [PATCH 11/18] feat: Monitor TCP and UDP sockets for network events
* Our first approach of monitoring link level funcs is not going anywhere due to lack of process context in those functions. So we resorted to monitoring TCP and UDP sockets which are more high level but surely we will have correct process context
Signed-off-by: Mahendra Paipuri
---
pkg/collector/bpf/Makefile | 29 ++-
pkg/collector/bpf/README.md | 74 +++++-
pkg/collector/bpf/lib/bpf_sock.h | 326 ++++++++++++++++++++++++
pkg/collector/bpf/network/bpf_network.c | 216 ++++++++++++++--
pkg/collector/bpf/network/bpf_network.h | 230 +++++++++++++----
5 files changed, 791 insertions(+), 84 deletions(-)
create mode 100644 pkg/collector/bpf/lib/bpf_sock.h
diff --git a/pkg/collector/bpf/Makefile b/pkg/collector/bpf/Makefile
index 5fcd097f..0dca7b7c 100644
--- a/pkg/collector/bpf/Makefile
+++ b/pkg/collector/bpf/Makefile
@@ -7,9 +7,8 @@ VFSDIR := vfs/
NETWORKDIR := network/
BPFTESTDIR := test/
-NETWORK = bpf_network.o
+NETWORK = bpf_network.o bpf_network_v519.o bpf_network_v64.o
VFS = bpf_vfs.o bpf_vfs_v511.o bpf_vfs_v62.o
-# BPFTEST = bpf_lseek.o
OBJSDIR := objs/
DEPSDIR := deps/
@@ -34,15 +33,23 @@ $(OBJSDIR):
$(DEPSDIR):
mkdir $(DEPSDIR)
-define DEFINE_VARIANT
+define DEFINE_VFS_VARIANT
VAR := $1
deps/bpf_vfs_$$(VAR).d: vfs/bpf_vfs.c
endef
# Generic build targets for each sub-dir
+$(eval $(call DEFINE_VFS_VARIANT,v511))
+$(eval $(call DEFINE_VFS_VARIANT,v62))
-$(eval $(call DEFINE_VARIANT,v511))
-$(eval $(call DEFINE_VARIANT,v62))
+define DEFINE_NET_VARIANT
+VAR := $1
+deps/bpf_network_$$(VAR).d: network/bpf_network.c
+endef
+
+# Generic build targets for each sub-dir
+$(eval $(call DEFINE_NET_VARIANT,v519))
+$(eval $(call DEFINE_NET_VARIANT,v64))
# Build only for relevant architectures
ifeq ($(BPF_TARGET_COMPILE),1)
@@ -70,9 +77,21 @@ $(DEPSDIR)%_v62.d:
objs/%.ll: $(NETWORKDIR)%.c
$(CLANG) $(CLANG_FLAGS) -c $< -o $@
+objs/%_v519.ll: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v519 -D__KERNEL_PRE_v64 -c $< -o $@
+
+objs/%_v64.ll: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v64 -c $< -o $@
+
$(DEPSDIR)%.d: $(NETWORKDIR)%.c
$(CLANG) $(CLANG_FLAGS) -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+$(DEPSDIR)%_v519.d: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v519 -D__KERNEL_PRE_v64 -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
+$(DEPSDIR)%_v64.d: $(NETWORKDIR)%.c
+ $(CLANG) $(CLANG_FLAGS) -D__KERNEL_PRE_v64 -MM -MP -MT $(patsubst $(DEPSDIR)%.d, $(OBJSDIR)%.ll, $@) $< > $@
+
# BPFTESTDIR
objs/%.ll: $(BPFTESTDIR)%.c
$(CLANG) $(CLANG_FLAGS) -c $< -o $@
diff --git a/pkg/collector/bpf/README.md b/pkg/collector/bpf/README.md
index 8d1c8b69..2925b5b5 100644
--- a/pkg/collector/bpf/README.md
+++ b/pkg/collector/bpf/README.md
@@ -1 +1,73 @@
-
+# BPF programs
+
+This folder contains bpf programs that are used by ebpf collector
+of CEEMS exporter. There are two sets of bpf programs:
+
+- `vfs`: These programs trace VFS events like `vfs_read`, `vfs_write`, _etc._
+- `network`: These programs trace network ingress and egress traffic.
+
+## VFS events
+
+Currently, bpf programs trace following events:
+
+- `vfs_write`
+- `vfs_read`
+- `vfs_create`
+- `vfs_open`
+- `vfs_unlink`
+- `vfs_mkdir`
+- `vfs_rmdir`
+
+For `x86` architectures, `fexit` probes are used to trace the functions where as
+for other architectures, `kretprobes` are used. The reason is that `fentry/fexit`
+probes are not available for all the architectures.
+
+Between different kernel versions from `5.8`, the function signatures of several
+`vfs_*` functions have changed. Thus, we compile bpf programs for different kernel
+versions and load appropriate program at runtime after detecting the kernel
+version of current host.
+
+## Network events
+
+For network events the bpf programs trace following functions:
+
+- `tcp_sendmsg`
+- `tcp_sendpage` (when exists. Removed from kernel >= 6.5)
+- `tcp_recvmsg`
+- `udp_sendmsg`
+- `udp_sendpage` (when exists. Removed from kernel >= 6.5)
+- `udp_recvmsg`
+- `udpv6_sendmsg`
+- `udpv6_recvmsg`
+
+By tracing above stated functions, we can get TCP and UDP traffics for both IPv4 and
+IPv6 families per cgroup. Function signatures for certain functions have changed in
+kernel 5.19 and it is taken into account. Based on the kernel version at the runtime,
+appropriate object file that contains correct bpf programs are loaded.
+
+A first attempt was made to monitor the network traffic by tracing more low-level
+functions: `__netif_receive_skb_core` for ingress traffic and
+`__dev_queue_xmit` for egress traffic. `__netif_receive_skb_core` is the core function
+where packet processing starts when it reaches the NIC. As we send a lot of packets
+at once (which happens quite often in real world cases), only few packets are processed
+in the user context and the rest of the packets are kept in device queues. Then kernel's
+SoftIRQ will handle the packets in queue for each CPU. Since SoftIRQ happens in kernel
+space we lose the cgroup context of the user. The consequence is that only few packets will
+be attributed to the cgroup and rest are attributed to the system.
+
+The objective of CEEMS is to accurately monitor the network traffic _per cgroup_ and hence,
+we resorted to monitor the network on more high level sockets where user context is
+preserved.
+
+## Building
+
+`clang >= 18` is a prerequisite to build the bpf programs. The programs can be build
+using provided Makefile
+
+```bash
+make all
+```
+
+This will create different bpf program objects in `objs/` folder. These object files are
+embedded into go binary during build process. Appropriate object file based on the current
+kernel version will be loaded at runtime.
diff --git a/pkg/collector/bpf/lib/bpf_sock.h b/pkg/collector/bpf/lib/bpf_sock.h
new file mode 100644
index 00000000..4c61015f
--- /dev/null
+++ b/pkg/collector/bpf/lib/bpf_sock.h
@@ -0,0 +1,326 @@
+//go:build ignore
+
+/* SPDX-License-Identifier: (GPL-3.0-only) */
+
+#define MAX_SOCKET_CONN_ENTRIES 2048
+
+/* conn_event represents a socket connection */
+struct conn_event {
+ __u64 saddr_h;
+ __u64 saddr_l;
+ __u64 daddr_h;
+ __u64 daddr_l;
+ __u16 sport;
+ __u16 dport;
+};
+
+/* Socket related stats */
+struct conn_stats {
+ __u64 packets_in; /* Ingress packets counter */
+ __u64 packets_out; /* Egress packets counter */
+ __u64 bytes_received; /* Ingress bytes */
+ __u64 bytes_sent; /* Egress bytes */
+ __u64 total_retrans; /* Retransmissions counter */
+ __u64 bytes_retrans; /* Retransmissions bytes */
+};
+
+/* Map to track socket connections */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_SOCKET_CONN_ENTRIES);
+ __type(key, struct conn_event); /* Key is the conn_event struct */
+ __type(value, struct conn_stats);
+} socket_accumulator SEC(".maps");
+
+/**
+ * is_ipv4_mapped_ipv6 checks if IPs are IPv4 mapped to IPv6 ::ffff:xxxx:xxxx
+ * https://tools.ietf.org/html/rfc4291#section-2.5.5
+ * the addresses are stored in network byte order so IPv4 adddress is stored
+ * in the most significant 32 bits of part saddr_l and daddr_l.
+ * Meanwhile the end of the mask is stored in the least significant 32 bits.
+ */
+FUNC_INLINE bool is_ipv4_mapped_ipv6(__u64 saddr_h, __u64 saddr_l, __u64 daddr_h, __u64 daddr_l)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return ((saddr_h == 0 && ((__u32)saddr_l == 0xFFFF0000)) || (daddr_h == 0 && ((__u32)daddr_l == 0xFFFF0000)));
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return ((saddr_h == 0 && ((__u32)(saddr_l >> 32) == 0x0000FFFF)) || (daddr_h == 0 && ((__u32)(daddr_l >> 32) == 0x0000FFFF)));
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+}
+
+/**
+ * tcp_sk casts sock struct into `tcp_sock` struct
+ * @sk: sock struct
+ *
+ * Returns pointer to `tcp_sock` struct
+ */
+FUNC_INLINE struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+/**
+ * inet_sk casts sock struct into `inet_sock` struct
+ * @sk: sock struct
+ *
+ * Returns pointer to `inet_sock` struct
+ */
+FUNC_INLINE struct inet_sock *inet_sk(const struct sock *sk)
+{
+ return (struct inet_sock *)sk;
+}
+
+/**
+ * read_in6_addr reads ipv6 address from `in6` struct
+ */
+FUNC_INLINE void read_in6_addr(__u64 *addr_h, __u64 *addr_l, const struct in6_addr *in6)
+{
+ bpf_probe_read_kernel(addr_h, sizeof(addr_h), _(&in6->in6_u.u6_addr32[0]));
+ bpf_probe_read_kernel(addr_l, sizeof(addr_l), _(&in6->in6_u.u6_addr32[2]));
+}
+
+/**
+ * read_sport reads source port from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns source port in host byte order
+ */
+FUNC_INLINE __u16 read_sport(struct sock *skp)
+{
+ // try skc_num, then inet_sport
+ __u16 sport;
+ bpf_probe_read_kernel(&sport, sizeof(sport), _(&skp->__sk_common.skc_num));
+ if (sport == 0) {
+ struct inet_sock *inet_sock = inet_sk(skp);
+ bpf_probe_read_kernel(&sport, sizeof(sport), _(&inet_sock->inet_sport));
+ sport = bpf_ntohs(sport);
+ }
+
+ return sport;
+}
+
+/**
+ * read_dport reads destination port from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns destination port in host byte order
+ */
+FUNC_INLINE __u16 read_dport(struct sock *skp)
+{
+ __u16 dport;
+ bpf_probe_read_kernel(&dport, sizeof(dport), _(&skp->__sk_common.skc_dport));
+ if (dport == 0) {
+ struct inet_sock *inet_sock = inet_sk(skp);
+ bpf_probe_read_kernel(&dport, sizeof(dport), _(&inet_sock->sk.__sk_common.skc_dport));
+ }
+
+ return bpf_ntohs(dport);
+}
+
+/**
+ * read_saddr_v4 reads source ipv4 address from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns source ipv4 address
+ */
+FUNC_INLINE __u32 read_saddr_v4(struct sock *skp)
+{
+ __u32 saddr;
+ bpf_probe_read_kernel(&saddr, sizeof(saddr), _(&skp->__sk_common.skc_rcv_saddr));
+ if (saddr == 0) {
+ struct inet_sock *inet_sockp = inet_sk(skp);
+ bpf_probe_read_kernel(&saddr, sizeof(saddr), _(&inet_sockp->inet_saddr));
+ }
+
+ return saddr;
+}
+
+/**
+ * read_daddr_v4 reads destination ipv4 address from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns destination ipv4 address
+ */
+FUNC_INLINE __u32 read_daddr_v4(struct sock *skp)
+{
+ __u32 daddr;
+ bpf_probe_read_kernel(&daddr, sizeof(daddr), _(&skp->__sk_common.skc_daddr));
+ if (daddr == 0) {
+ struct inet_sock *inet_sock = inet_sk(skp);
+ bpf_probe_read_kernel(&daddr, sizeof(daddr), _(&inet_sock->sk.__sk_common.skc_daddr));
+ }
+
+ return daddr;
+}
+
+/**
+ * read_saddr_v6 reads source ipv6 address from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns none
+ */
+FUNC_INLINE void read_saddr_v6(struct sock *skp, __u64 *addr_h, __u64 *addr_l)
+{
+ struct in6_addr in6;
+ bpf_probe_read_kernel(&in6, sizeof(in6), _(&skp->__sk_common.skc_v6_rcv_saddr));
+ read_in6_addr(addr_h, addr_l, &in6);
+}
+
+/**
+ * read_daddr_v6 reads destination ipv6 address from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns none
+ */
+FUNC_INLINE void read_daddr_v6(struct sock *skp, __u64 *addr_h, __u64 *addr_l)
+{
+ struct in6_addr in6;
+ bpf_probe_read_kernel(&in6, sizeof(in6), _(&skp->__sk_common.skc_v6_daddr));
+ read_in6_addr(addr_h, addr_l, &in6);
+}
+
+/**
+ * _sk_family reads socket family from `sock` struct
+ * @sk: `sock` struct
+ *
+ * Returns socket family
+ */
+FUNC_INLINE __u16 _sk_family(struct sock *skp)
+{
+ __u16 family;
+ bpf_probe_read_kernel(&family, sizeof(family), _(&skp->__sk_common.skc_family));
+ return family;
+}
+
+/**
+ * read_conn_tuple reads values into a `conn_event` from a `sock` struct.
+ * @t: `conn_event` struct
+ * @skp: `sock` struct
+ *
+ * Returns 0 success, 1 otherwise.
+ */
+FUNC_INLINE int read_conn_tuple(struct conn_event *t, struct sock *skp)
+{
+ int err = 0;
+
+ u16 family = _sk_family(skp);
+ // Retrieve addresses
+ if (family == AF_INET) {
+ if (t->saddr_l == 0) {
+ t->saddr_l = read_saddr_v4(skp);
+ }
+ if (t->daddr_l == 0) {
+ t->daddr_l = read_daddr_v4(skp);
+ }
+
+ if (t->saddr_l == 0 || t->daddr_l == 0) {
+ err = 1;
+ }
+ } else if (family == AF_INET6) {
+ if (!(t->saddr_h || t->saddr_l)) {
+ read_saddr_v6(skp, &t->saddr_h, &t->saddr_l);
+ }
+ if (!(t->daddr_h || t->daddr_l)) {
+ read_daddr_v6(skp, &t->daddr_h, &t->daddr_l);
+ }
+
+ if (!(t->saddr_h || t->saddr_l)) {
+ err = 1;
+ }
+
+ if (!(t->daddr_h || t->daddr_l)) {
+ err = 1;
+ }
+
+ // Check if we can map IPv6 to IPv4
+ if (is_ipv4_mapped_ipv6(t->saddr_h, t->saddr_l, t->daddr_h, t->daddr_l)) {
+ t->saddr_h = 0;
+ t->daddr_h = 0;
+ t->saddr_l = (__u32)(t->saddr_l >> 32);
+ t->daddr_l = (__u32)(t->daddr_l >> 32);
+ }
+ } else {
+ err = 1;
+ }
+
+ // Retrieve ports
+ if (t->sport == 0) {
+ t->sport = read_sport(skp);
+ }
+ if (t->dport == 0) {
+ t->dport = read_dport(skp);
+ }
+
+ if (t->sport == 0 || t->dport == 0) {
+ err = 1;
+ }
+
+ return err;
+}
+
+/**
+ * read_conn_stats reads incremental stats into a `conn_stats` for a `sock` struct.
+ * @t: `conn_stats` struct
+ * @skp: `sock` struct
+ *
+ * Returns 0 success, 1 otherwise.
+ */
+FUNC_INLINE int read_conn_stats(struct conn_stats *incr_stats, struct sock *skp)
+{
+ // Read current socket connection
+ struct conn_event t = { 0 };
+ if (read_conn_tuple(&t, skp))
+ return 1;
+
+ // Read socket connection stats
+ // IMPORTANT to read them into correct types and then later cast into our custom types
+ __u32 packets_in, packets_out, total_retrans;
+ __u64 bytes_received, bytes_sent, bytes_retrans;
+
+ // Cast into tcp_sock struct to read packets and bytes
+ struct tcp_sock *tcp_skp = tcp_sk(skp);
+
+ // Use helpers to read kernel memory
+ bpf_probe_read_kernel(&packets_in, sizeof(packets_in), _(&tcp_skp->segs_in));
+ bpf_probe_read_kernel(&packets_out, sizeof(packets_out), _(&tcp_skp->segs_out));
+ bpf_probe_read_kernel(&bytes_received, sizeof(bytes_received), _(&tcp_skp->bytes_received));
+ bpf_probe_read_kernel(&bytes_sent, sizeof(bytes_sent), _(&tcp_skp->bytes_sent));
+ bpf_probe_read_kernel(&total_retrans, sizeof(total_retrans), _(&tcp_skp->total_retrans));
+ bpf_probe_read_kernel(&bytes_retrans, sizeof(bytes_retrans), _(&tcp_skp->bytes_retrans));
+
+ struct conn_stats *stats = bpf_map_lookup_elem(&socket_accumulator, &t);
+ if (!stats) {
+ // Update stats
+ incr_stats->packets_in = (__u64)packets_in;
+ incr_stats->packets_out = (__u64)packets_out;
+ incr_stats->bytes_received = bytes_received;
+ incr_stats->bytes_sent = bytes_sent;
+ incr_stats->total_retrans = (__u64)total_retrans;
+ incr_stats->bytes_retrans = bytes_retrans;
+
+ // Update map
+ bpf_map_update_elem(&socket_accumulator, &t, incr_stats, BPF_NOEXIST);
+
+ return 0;
+ }
+
+ // Update incr_stats
+ incr_stats->packets_in = (__u64)packets_in - stats->packets_in;
+ incr_stats->packets_out = (__u64)packets_out - stats->packets_out;
+ incr_stats->bytes_received = bytes_received - stats->bytes_received;
+ incr_stats->bytes_sent = bytes_sent - stats->bytes_sent;
+ incr_stats->total_retrans = (__u64)total_retrans - stats->total_retrans;
+ incr_stats->bytes_retrans = bytes_retrans - stats->bytes_retrans;
+
+ // Update map with new counters
+ stats->packets_in = (__u64)packets_in;
+ stats->packets_out = (__u64)packets_out;
+ stats->bytes_received = bytes_received;
+ stats->bytes_sent = bytes_sent;
+ stats->total_retrans = (__u64)total_retrans;
+ stats->bytes_retrans = bytes_retrans;
+
+ return 0;
+}
diff --git a/pkg/collector/bpf/network/bpf_network.c b/pkg/collector/bpf/network/bpf_network.c
index 6fcd9fbf..b19e824e 100644
--- a/pkg/collector/bpf/network/bpf_network.c
+++ b/pkg/collector/bpf/network/bpf_network.c
@@ -9,44 +9,204 @@ char __license[] SEC("license") = "GPL";
/**
* Network related programs.
*
- * These are internal kernel functions that we are tracing and the
- * names can be architecture dependent. So we need to check
- * /proc/kallsmys at runtime and check correct function name and
- * hook the program.
+ * Currently we are monitoring TCP and UDP traffics for both
+ * IPv4 and IPv6.
*
- * If we use fentry (which should be theoritically more performant)
- * we will have to set correct function name at runtime which is
- * complicated to achieve. So, we use kprobes for these events.
+ * The change in function signatures for certain functions are
+ * taken into account. Similarly, `tcp_sendpage` and `udp_sendpage`
+ * have been removed in kernel 6.5 and it has been taken into
+ * account as well. The functions we are using to trace here are
+ * exported functions and their names should not change with
+ * any kernel related optimizations.
*
- * Inital benchmarks showed that fentry/fexit is 100-150 ns faster
- * than kprobes.
+ * For x86_64, fentry/fexit are used which give max performance and
+ * for the rest of architectures, kprobes/kretprobes are used.
*
- * However, cilium ebpf refused to load program __netif_receive_skb_core
- * on newer kernels as there is wrapper exported function
- * netif_receive_skb_core in kernels 6.x.
- * In older kernels there is a bug that is preventing the tracing
- * functions to access skb pointer.
+ * Inital benchmarks tcp_sendmsg probe takes 1200ns/call whereas
+ * tcp_recvmsg probe takes 6000ns/call. More benchmarks to do to
+ * measure overhead on other probes. These tests are made using
+ * bpftool by setting `sysctl -w kernel.bpf_stats_enabled=1`.
+ * Note that there is a overhead of around 20-30ns due to
+ * instrumentation.
*
- * To avoid more complications, we use ONLY kprobes which should work
- * in all cases.
- *
- * NOTE that we still need to find architectural specific names
- * before loading the program by lookin at /proc/kallsyms
*/
-SEC("kprobe/__netif_receive_skb_core")
-__u64 kprobe___netif_receive_skb_core(struct pt_regs *ctx)
+#if defined(__TARGET_ARCH_x86)
+
+SEC("fexit/tcp_sendmsg")
+__u64 BPF_PROG(fexit_tcp_sendmsg, struct sock *sk, struct msghdr *msg, size_t size, int ret)
+{
+ return handle_tcp_event(sk);
+}
+
+SEC("fexit/udp_sendmsg")
+__u64 BPF_PROG(fexit_udp_sendmsg, struct sock *sk, struct msghdr *msg, size_t size, int ret)
+{
+ return handle_udp_event(ret, AF_INET, MODE_EGRESS);
+}
+
+SEC("fexit/udpv6_sendmsg")
+__u64 BPF_PROG(fexit_udpv6_sendmsg, struct sock *sk, struct msghdr *msg, size_t size, int ret)
+{
+ return handle_udp_event(ret, AF_INET6, MODE_EGRESS);
+}
+
+#if defined(__KERNEL_PRE_v64)
+
+SEC("fexit/tcp_sendpage")
+__u64 BPF_PROG(fexit_tcp_sendpage, struct sock *sk, struct page *page, int offset, size_t size, int flags, int ret)
+{
+ return handle_tcp_event(sk);
+}
+
+SEC("fexit/udp_sendpage")
+__u64 BPF_PROG(fexit_udp_sendpage, struct sock *sk, struct page *page, int offset, size_t size, int flags, int ret)
+{
+ return handle_udp_event(ret, AF_INET, MODE_EGRESS);
+}
+
+#endif
+
+#if defined(__KERNEL_PRE_v519)
+
+SEC("fexit/tcp_recvmsg")
+__u64 BPF_PROG(fexit_tcp_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int noblock, int flags, int *addr_len, int ret)
+{
+ return handle_tcp_event(sk);
+}
+
+SEC("fexit/udp_recvmsg")
+__u64 BPF_PROG(fexit_udp_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int noblock, int flags, int *addr_len, int ret)
{
- struct sk_buff *skb;
- bpf_probe_read_kernel(&skb, sizeof(skb), (void *) _(PT_REGS_PARM1(ctx)));
+ return handle_udp_event(ret, AF_INET, MODE_INGRESS);
+}
- return handle_skb_event(skb, MODE_INGRESS);
+SEC("fexit/udpv6_recvmsg")
+__u64 BPF_PROG(fexit_udpv6_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int noblock, int flags, int *addr_len, int ret)
+{
+ return handle_udp_event(ret, AF_INET6, MODE_INGRESS);
}
-SEC("kprobe/__dev_queue_xmit")
-__u64 kprobe___dev_queue_xmit(struct pt_regs *ctx)
+#else
+
+SEC("fexit/tcp_recvmsg")
+__u64 BPF_PROG(fexit_tcp_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int flags, int *addr_len, int ret)
{
- struct sk_buff *skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
+ return handle_tcp_event(sk);
+}
- return handle_skb_event(skb, MODE_EGRESS);
+SEC("fexit/udp_recvmsg")
+__u64 BPF_PROG(fexit_udp_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int flags, int *addr_len, int ret)
+{
+ return handle_udp_event(ret, AF_INET, MODE_INGRESS);
}
+
+SEC("fexit/udpv6_recvmsg")
+__u64 BPF_PROG(fexit_udpv6_recvmsg, struct sock *sk, struct msghdr *msg, size_t size, int flags, int *addr_len, int ret)
+{
+ return handle_udp_event(ret, AF_INET6, MODE_INGRESS);
+}
+
+#endif
+
+#else
+
+SEC("kprobe/tcp_sendmsg")
+__u64 kprobe_tcp_sendmsg(struct pt_regs *ctx)
+{
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+
+ return handle_tcp_event(sk);
+}
+
+SEC("kretprobe/udp_sendmsg")
+__u64 kretprobe_udp_sendmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET, MODE_EGRESS);
+}
+
+SEC("kretprobe/udpv6_sendmsg")
+__u64 kretprobe_udpv6_sendmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET6, MODE_EGRESS);
+}
+
+#if defined(__KERNEL_PRE_v64)
+
+SEC("kprobe/tcp_sendpage")
+__u64 kprobe_tcp_sendpage(struct pt_regs *ctx)
+{
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+
+ return handle_tcp_event(sk);
+}
+
+SEC("kretprobe/udp_sendpage")
+__u64 kretprobe_udp_sendpage(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET, MODE_EGRESS);
+}
+
+#endif
+
+#if defined(__KERNEL_PRE_v519)
+
+SEC("kprobe/tcp_recvmsg")
+__u64 kprobe_tcp_recvmsg(struct pt_regs *ctx)
+{
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+
+ return handle_tcp_event(sk);
+}
+
+SEC("kretprobe/udp_recvmsg")
+__u64 kretprobe_udp_recvmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET, MODE_INGRESS);
+}
+
+SEC("kretprobe/udpv6_recvmsg")
+__u64 kretprobe_udpv6_recvmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET6, MODE_INGRESS);
+}
+
+#else
+
+SEC("kprobe/tcp_recvmsg")
+__u64 kprobe_tcp_recvmsg(struct pt_regs *ctx)
+{
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+
+ return handle_tcp_event(sk);
+}
+
+SEC("kretprobe/udp_recvmsg")
+__u64 kretprobe_udp_recvmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET, MODE_INGRESS);
+}
+
+SEC("kretprobe/udpv6_recvmsg")
+__u64 kretprobe_udpv6_recvmsg(struct pt_regs *ctx)
+{
+ int ret = (int)PT_REGS_RC(ctx);
+
+ return handle_udp_event(ret, AF_INET6, MODE_INGRESS);
+}
+
+#endif
+
+#endif
diff --git a/pkg/collector/bpf/network/bpf_network.h b/pkg/collector/bpf/network/bpf_network.h
index 5c9bcaf2..00df7a6f 100644
--- a/pkg/collector/bpf/network/bpf_network.h
+++ b/pkg/collector/bpf/network/bpf_network.h
@@ -10,20 +10,22 @@
#include "bpf_core_read.h"
#include "bpf_cgroup.h"
+#include "bpf_sock.h"
enum net_mode {
- MODE_INGRESS,
- MODE_EGRESS
+ MODE_INGRESS,
+ MODE_EGRESS
};
-/* network related event key struct */
-struct net_event_key {
+/* network related event struct */
+struct net_event {
__u32 cid; /* cgroup ID */
- __u8 dev[16]; /* Device name */
+ __u16 proto; /* TCP/UDP */
+ __u16 fam; /* sk family AF_INET/AF_INET6 */
};
-/* Any network IPv4/IPv6 related event */
-struct net_event {
+/* Any network IPv4/IPv6 related stats */
+struct net_stats {
__u64 packets; /* Packets counter */
__u64 bytes; /* Bytes counter */
};
@@ -32,86 +34,214 @@ struct net_event {
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, MAX_MAP_ENTRIES);
- __type(key, struct net_event_key); /* Key is the vfs_event_key struct */
- __type(value, struct net_event);
+ __type(key, struct net_event); /* Key is the net_event struct */
+ __type(value, struct net_stats);
} ingress_accumulator SEC(".maps");
/* Map to track ingress events */
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, MAX_MAP_ENTRIES);
- __type(key, struct net_event_key); /* Key is the net_event_key struct */
- __type(value, struct net_event);
+ __type(key, struct net_event); /* Key is the net_event struct */
+ __type(value, struct net_stats);
} egress_accumulator SEC(".maps");
+/* Map to track retransmission events */
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, MAX_MAP_ENTRIES);
+ __type(key, struct net_event); /* Key is the net_event struct */
+ __type(value, struct net_stats);
+} retrans_accumulator SEC(".maps");
+
+/**
+ * handle_ingress_event updates the maps with ingress event
+ * @key: target key to `ingress_accumulator`
+ * @stats: `conn_stats` struct to update map
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_ingress_event(struct net_event *key, struct conn_stats *stats)
+{
+ // Get current ingress stats
+ struct net_stats *ingress_stats = bpf_map_lookup_elem(&ingress_accumulator, key);
+ if (!ingress_stats) {
+ // New event
+ struct net_stats new_stats = { .packets = stats->packets_in, .bytes = stats->bytes_received };
+
+ // Update map with new key and event
+ bpf_map_update_elem(&ingress_accumulator, key, &new_stats, BPF_NOEXIST);
+
+ return 0;
+ }
+
+ // Update map with new stats only when the packets are non zero
+ if (stats->packets_in > 0) {
+ __sync_fetch_and_add(&ingress_stats->bytes, stats->bytes_received);
+ __sync_fetch_and_add(&ingress_stats->packets, stats->packets_in);
+ }
+
+ return 0;
+}
+
+/**
+ * handle_egress_event updates the maps with egress event
+ * @key: target key to `retrans_accumulator`
+ * @stats: `conn_stats` struct to update map
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_egress_event(struct net_event *key, struct conn_stats *stats)
+{
+ // Get current egress stats
+ struct net_stats *egress_stats = bpf_map_lookup_elem(&egress_accumulator, key);
+ if (!egress_stats) {
+ // New event with increment call counter
+ struct net_stats new_stats = { .packets = stats->packets_out, .bytes = stats->bytes_sent };
+
+ // Update map with new key and event
+ bpf_map_update_elem(&egress_accumulator, key, &new_stats, BPF_NOEXIST);
+
+ return 0;
+ }
+
+ // Update map with new stats only when the packets are non zero
+ if (stats->packets_out > 0) {
+ __sync_fetch_and_add(&egress_stats->bytes, stats->bytes_sent);
+ __sync_fetch_and_add(&egress_stats->packets, stats->packets_out);
+ }
+
+ return 0;
+}
+
/**
- * handle_skb_event updates the maps with event by incrementing packets
- * and bytes counters to the existing event
- * @skb: target skb
+ * handle_retrans_event updates the maps with retransmission event
+ * @key: target key to `retrans_accumulator`
+ * @stats: `conn_stats` struct to update map
*
* Returns always 0.
*/
-FUNC_INLINE __u64 handle_skb_event(struct sk_buff *skb, int type)
+FUNC_INLINE __u64 handle_retrans_event(struct net_event *key, struct conn_stats *stats)
{
- __u32 len;
- struct net_device *dev;
+ // Get current retrans stats
+ struct net_stats *retrans_stats = bpf_map_lookup_elem(&retrans_accumulator, key);
+ if (!retrans_stats) {
+ // New event with increment call counter
+ struct net_stats new_stats = { .packets = stats->total_retrans, .bytes = stats->bytes_retrans };
- struct net_event_key key = {0};
+ // Update map with new key and event
+ bpf_map_update_elem(&retrans_accumulator, key, &new_stats, BPF_NOEXIST);
- // Get cgroup ID
- key.cid = (__u32) ceems_get_current_cgroup_id();
+ return 0;
+ }
- // If cgroup id is 1, it means it is root cgroup and we are not really interested
- // in it and so return
- // Similarly if cgroup id is 0, it means we failed to get cgroup ID
- if (key.cid == 0 || key.cid == 1)
- return TC_ACT_OK;
+ // Update map with new stats only when the packets are non zero
+ if (stats->total_retrans > 0) {
+ __sync_fetch_and_add(&retrans_stats->bytes, stats->bytes_retrans);
+ __sync_fetch_and_add(&retrans_stats->packets, stats->total_retrans);
+ }
- // Read packet bytes and device name
- bpf_probe_read_kernel(&len, sizeof(len), _(&skb->len));
- bpf_probe_read_kernel(&dev, sizeof(dev), _(&skb->dev));
- bpf_probe_read_kernel_str(&key.dev, sizeof(key.dev), _(&dev->name));
+ return 0;
+}
+
+/**
+ * handle_tcp_event updates the maps based on TCP socket events
+ * @skp: target `sock` struct
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_tcp_event(struct sock *skp)
+{
+ struct net_event key = { 0 };
+
+ // Ignore if cgroup ID caanot be found
+ key.cid = (__u32)ceems_get_current_cgroup_id();
+ if (key.cid == 0)
+ return 0;
+
+ /**
+ * We can directly access kernel memory without helpers for fentry/fexit bpf progs
+ * Ref: https://nakryiko.com/posts/bpf-core-reference-guide/#bpf-core-read-1
+ *
+ * However, we still need to use kprobe/kretprobe for archs other than x86 and
+ * thus we always access memory using helpers to be able to use the helper functions
+ * for all types of probes.
+ */
+ key.fam = _sk_family(skp);
+ key.proto = (__u16)IPPROTO_TCP;
+
+ // If conn_stats is null, return. There is nothing to update
+ struct conn_stats stats = { 0 };
+ if (read_conn_stats(&stats, skp)) {
+ return 0;
+ }
- struct net_event *event;
+ // Handle ingress, egress and retrans events
+ handle_ingress_event(&key, &stats);
+ handle_egress_event(&key, &stats);
+ handle_retrans_event(&key, &stats);
+
+ return 0;
+}
+
+/**
+ * handle_udp_event updates the maps based on UDP socket events
+ * @ret: return value of kernel function. Either size or error
+ *
+ * Returns always 0.
+ */
+FUNC_INLINE __u64 handle_udp_event(int ret, int family, int type)
+{
+ // Negative return value means failed event
+ if (ret <= 0)
+ return 0;
+
+ __u64 bytes = (__u64)ret;
+ struct net_event key = { 0 };
+
+ // Ignore if cgroup ID caanot be found
+ key.cid = (__u32)ceems_get_current_cgroup_id();
+ if (key.cid == 0)
+ return 0;
+
+ key.fam = (__u16)family;
+ key.proto = (__u16)IPPROTO_UDP;
// Fetch event from correct map
+ struct net_stats *stats;
switch (type) {
case MODE_INGRESS:
- event = bpf_map_lookup_elem(&ingress_accumulator, &key);
+ stats = bpf_map_lookup_elem(&ingress_accumulator, &key);
break;
case MODE_EGRESS:
- event = bpf_map_lookup_elem(&egress_accumulator, &key);
+ stats = bpf_map_lookup_elem(&egress_accumulator, &key);
break;
default:
- return TC_ACT_OK;
+ return 0;
}
- // Get packet size
- __u64 bytes = (__u64) bpf_ntohs(len);
-
- if (!event) {
+ if (!stats) {
// New event with increment call counter
- struct net_event new_event = { .packets = 1, .bytes = bytes };
-
- // Update map with new key and event
+ struct net_stats new_stats = { .packets = 1, .bytes = bytes };
+
+ // Update map with new key and event
switch (type) {
case MODE_INGRESS:
- bpf_map_update_elem(&ingress_accumulator, &key, &new_event, BPF_NOEXIST);
+ bpf_map_update_elem(&ingress_accumulator, &key, &new_stats, BPF_NOEXIST);
break;
case MODE_EGRESS:
- bpf_map_update_elem(&egress_accumulator, &key, &new_event, BPF_NOEXIST);
+ bpf_map_update_elem(&egress_accumulator, &key, &new_stats, BPF_NOEXIST);
break;
default:
- return TC_ACT_OK;
+ return 0;
}
- return TC_ACT_OK;
- }
+ return 0;
+ }
- // Always increment calls
- __sync_fetch_and_add(&event->packets, 1);
- __sync_fetch_and_add(&event->bytes, bytes);
+ // Increment packets and bytes
+ __sync_fetch_and_add(&stats->packets, 1);
+ __sync_fetch_and_add(&stats->bytes, bytes);
- // Let the packet pass
- return TC_ACT_OK;
+ return 0;
}
From fbd2c842ff1a33fffcbcdecfd52a5b461a6c3e7c Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Sun, 15 Sep 2024 18:23:22 +0200
Subject: [PATCH 12/18] style: Add clang-formatting rules and make target
Signed-off-by: Mahendra Paipuri
---
.clang-format | 51 +++++++
Makefile.common | 31 ++++-
pkg/collector/bpf/include/compiler.h | 19 +--
pkg/collector/bpf/include/net_shared.h | 34 ++---
pkg/collector/bpf/lib/bpf_cgroup.h | 3 +-
pkg/collector/bpf/lib/bpf_helpers.h | 180 ++++++++++++++-----------
pkg/collector/bpf/lib/bpf_path.h | 16 +--
pkg/collector/bpf/lib/config.h | 2 +-
pkg/collector/bpf/vfs/bpf_vfs.c | 142 +++++++++----------
pkg/collector/bpf/vfs/bpf_vfs.h | 114 ++++++++--------
10 files changed, 344 insertions(+), 248 deletions(-)
create mode 100644 .clang-format
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 00000000..d7988f90
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,51 @@
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveBitFields: false
+AlignConsecutiveDeclarations: false
+AlignConsecutiveMacros: true
+AlignEscapedNewlines: Left
+AlignOperands: true
+AlignTrailingComments: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortEnumsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+BasedOnStyle: LLVM
+BraceWrapping:
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterStruct: false
+ AfterUnion: false
+ BeforeElse: false
+ IndentBraces: false
+BreakBeforeBraces: Custom
+ColumnLimit: 0
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 8
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+IndentCaseLabels: false
+IndentPPDirectives: None
+IndentWidth: 8
+IndentWrappedFunctionNames: false
+PointerAlignment: Right
+ReflowComments: false
+SortIncludes: false
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: false
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+TabWidth: 8
+UseTab: Always
diff --git a/Makefile.common b/Makefile.common
index 442312a8..86830e4f 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -24,9 +24,11 @@ PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9]
PROMU := $(FIRST_GOPATH)/bin/promu
SWAG := $(FIRST_GOPATH)/bin/swag
-CLANG := $(firstword $(subst :, ,$(shell which clang)))
pkgs = ./...
+# clang format
+FORMAT_FIND_FLAGS ?= -name '*.c' -o -name '*.h' -not -path 'pkg/collector/bpf/include/vmlinux.h' -not -path 'pkg/collector/bpf/libbpf/*'
+
ifeq (arm, $(GOHOSTARCH))
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
@@ -173,6 +175,10 @@ else
yamllint .
endif
+.PHONY: common-clang-format
+clang-format: ## Run code formatter on BPF code.
+ find pkg/collector/bpf $(FORMAT_FIND_FLAGS) | xargs -n 1000 clang-format -i -style=file
+
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
@@ -247,21 +253,32 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
-# Build bpf assets using clang
-.PHONY: bpf
+# Build bpf assets only when CGO_BUILD=0
ifeq ($(CGO_BUILD), 0)
-bpf: clang
+# Build bpf assets
+.PHONY: bpf
+bpf: clang bpfclean
@echo ">> building bpf assets using clang"
- $(MAKE) -C ./pkg/collector/bpf clean
$(MAKE) -C ./pkg/collector/bpf
+
+# Clean existing bpf assets. When GOARCH is set we ALWAYS clean the
+# assets as we need to build them for each architecture
+.PHONY: bpfclean
+ifdef GOARCH
+bpfclean:
+ @echo ">> cleaning existing bpf assets"
+ $(MAKE) -C ./pkg/collector/bpf clean
endif
-# Install clang using script. We chack the version of clang inside the script and ensure
-# that we install clang >= 18
+# Install clang using script. Do it only when GOARCH is set as we need
+# clang to build go binaries inside golang-builder container.
.PHONY: clang
+ifdef GOARCH
clang:
@echo ">> installing clang"
@./scripts/install_clang.sh
+endif
+endif
# Dont run swagger for release builds. This is due to cross compiling with GOARCH set
# to different archs and swag will be built in arch specific bin folder.
diff --git a/pkg/collector/bpf/include/compiler.h b/pkg/collector/bpf/include/compiler.h
index 85a308fc..89bb1968 100644
--- a/pkg/collector/bpf/include/compiler.h
+++ b/pkg/collector/bpf/include/compiler.h
@@ -22,15 +22,15 @@ const void *__builtin_preserve_access_index(void *);
#define _(P) (__builtin_preserve_access_index(P))
#ifndef likely
-# define likely(X) __builtin_expect(!!(X), 1)
+#define likely(X) __builtin_expect(!!(X), 1)
#endif
#ifndef unlikely
-# define unlikely(X) __builtin_expect(!!(X), 0)
+#define unlikely(X) __builtin_expect(!!(X), 0)
#endif
#ifndef __inline__
-# define __inline__ __attribute__((always_inline))
+#define __inline__ __attribute__((always_inline))
#endif
#define DEBUG
@@ -38,13 +38,16 @@ const void *__builtin_preserve_access_index(void *);
/* Only use this for debug output. Notice output from bpf_trace_printk()
* ends up in /sys/kernel/debug/tracing/trace_pipe
*/
-#define bpf_debug(fmt, ...) \
- ({ \
- char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
+#define bpf_debug(fmt, ...) \
+ ({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
})
#else
-#define bpf_debug(fmt, ...){;}
+#define bpf_debug(fmt, ...) \
+ { \
+ ; \
+ }
#endif
// Just to ensure that we can use vfs_write/vfs_read calls
diff --git a/pkg/collector/bpf/include/net_shared.h b/pkg/collector/bpf/include/net_shared.h
index 4cc59b0f..f72a0dba 100644
--- a/pkg/collector/bpf/include/net_shared.h
+++ b/pkg/collector/bpf/include/net_shared.h
@@ -3,33 +3,33 @@
#ifndef _NET_SHARED_H
#define _NET_SHARED_H
-#define AF_INET 2
-#define AF_INET6 10
+#define AF_INET 2
+#define AF_INET6 10
-#define ETH_ALEN 6
+#define ETH_ALEN 6
#define ETH_P_802_3_MIN 0x0600
-#define ETH_P_8021Q 0x8100
-#define ETH_P_8021AD 0x88A8
-#define ETH_P_IP 0x0800
-#define ETH_P_IPV6 0x86DD
-#define ETH_P_ARP 0x0806
-#define IPPROTO_ICMPV6 58
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define ETH_P_ARP 0x0806
+#define IPPROTO_ICMPV6 58
-#define TC_ACT_OK 0
-#define TC_ACT_SHOT 2
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
#define IFNAMSIZ 16
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define bpf_ntohs(x) __builtin_bswap16(x)
-#define bpf_htons(x) __builtin_bswap16(x)
+#define bpf_ntohs(x) __builtin_bswap16(x)
+#define bpf_htons(x) __builtin_bswap16(x)
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define bpf_ntohs(x) (x)
-#define bpf_htons(x) (x)
+#define bpf_ntohs(x) (x)
+#define bpf_htons(x) (x)
#else
-# error "Endianness detection needs to be set up for your compiler?!"
+#error "Endianness detection needs to be set up for your compiler?!"
#endif
-#endif
+#endif
diff --git a/pkg/collector/bpf/lib/bpf_cgroup.h b/pkg/collector/bpf/lib/bpf_cgroup.h
index 9480c690..8729ab79 100644
--- a/pkg/collector/bpf/lib/bpf_cgroup.h
+++ b/pkg/collector/bpf/lib/bpf_cgroup.h
@@ -19,7 +19,7 @@
#define EVENT_ERROR_CGROUP_KN 0x020000
#define EVENT_ERROR_CGROUP_SUBSYSCGRP 0x040000
#define EVENT_ERROR_CGROUP_SUBSYS 0x080000
-#define EVENT_ERROR_CGROUPS 0x100000
+#define EVENT_ERROR_CGROUPS 0x100000
#define EVENT_ERROR_CGROUP_ID 0x200000
/* Represent old kernfs node with the kernfs_node_id
@@ -188,7 +188,6 @@ FUNC_INLINE __u64 ceems_get_current_cgroupv1_id(int subsys_idx)
return get_cgroup_id(cgrp);
}
-
/**
* ceems_get_current_cgroup_id() Returns the accurate cgroup id of current task.
*
diff --git a/pkg/collector/bpf/lib/bpf_helpers.h b/pkg/collector/bpf/lib/bpf_helpers.h
index b461733d..bc918ba1 100644
--- a/pkg/collector/bpf/lib/bpf_helpers.h
+++ b/pkg/collector/bpf/lib/bpf_helpers.h
@@ -11,8 +11,8 @@
*/
#include "bpf_helper_defs.h"
-#define __uint(name, val) int (*name)[val]
-#define __type(name, val) typeof(val) *name
+#define __uint(name, val) int(*name)[val]
+#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
/*
@@ -23,11 +23,11 @@
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
* make sure __attribute__((unused)) doesn't trigger compilation warning.
*/
-#define SEC(name) \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
- __attribute__((section(name), used)) \
- _Pragma("GCC diagnostic pop") \
+#define SEC(name) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
+ __attribute__((section(name), used)) \
+ _Pragma("GCC diagnostic pop")
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
#undef __always_inline
@@ -66,13 +66,13 @@
* Helper macros to manipulate data structures
*/
#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
#endif
#ifndef container_of
-#define container_of(ptr, type, member) \
- ({ \
- void *__mptr = (void *)(ptr); \
- ((type *)(__mptr - offsetof(type, member))); \
+#define container_of(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
})
#endif
@@ -89,7 +89,7 @@
* being compiled out.
*/
#ifndef __bpf_unreachable
-# define __bpf_unreachable() __builtin_trap()
+#define __bpf_unreachable() __builtin_trap()
#endif
/*
@@ -118,8 +118,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
asm volatile("r1 = %[ctx]\n\t"
"r2 = %[map]\n\t"
"r3 = %[slot]\n\t"
- "call 12"
- :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
+ "call 12" ::[ctx] "r"(ctx),
+ [map] "r"(map), [slot] "i"(slot)
: "r0", "r1", "r2", "r3", "r4", "r5");
}
#endif
@@ -149,10 +149,10 @@ enum libbpf_tristate {
};
#define __kconfig __attribute__((section(".kconfig")))
-#define __ksym __attribute__((section(".ksyms")))
+#define __ksym __attribute__((section(".ksyms")))
#ifndef ___bpf_concat
-#define ___bpf_concat(a, b) a ## b
+#define ___bpf_concat(a, b) a##b
#endif
#ifndef ___bpf_apply
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
@@ -165,19 +165,43 @@ enum libbpf_tristate {
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#endif
-#define ___bpf_fill0(arr, p, x) do {} while (0)
+#define ___bpf_fill0(arr, p, x) \
+ do { \
+ } while (0)
#define ___bpf_fill1(arr, p, x) arr[p] = x
-#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
-#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
-#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
-#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
-#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
-#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
-#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
-#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
-#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
-#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
-#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
+#define ___bpf_fill2(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill1(arr, p + 1, args)
+#define ___bpf_fill3(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill2(arr, p + 1, args)
+#define ___bpf_fill4(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill3(arr, p + 1, args)
+#define ___bpf_fill5(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill4(arr, p + 1, args)
+#define ___bpf_fill6(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill5(arr, p + 1, args)
+#define ___bpf_fill7(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill6(arr, p + 1, args)
+#define ___bpf_fill8(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill7(arr, p + 1, args)
+#define ___bpf_fill9(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill8(arr, p + 1, args)
+#define ___bpf_fill10(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill9(arr, p + 1, args)
+#define ___bpf_fill11(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill10(arr, p + 1, args)
+#define ___bpf_fill12(arr, p, x, args...) \
+ arr[p] = x; \
+ ___bpf_fill11(arr, p + 1, args)
#define ___bpf_fill(arr, args...) \
___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
@@ -185,37 +209,37 @@ enum libbpf_tristate {
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
* in a structure.
*/
-#define BPF_SEQ_PRINTF(seq, fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
- ___param, sizeof(___param)); \
-})
+#define BPF_SEQ_PRINTF(seq, fmt, args...) \
+ ({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+ })
/*
* BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
* an array of u64.
*/
-#define BPF_SNPRINTF(out, out_size, fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_snprintf(out, out_size, ___fmt, \
- ___param, sizeof(___param)); \
-})
+#define BPF_SNPRINTF(out, out_size, fmt, args...) \
+ ({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_snprintf(out, out_size, ___fmt, \
+ ___param, sizeof(___param)); \
+ })
#ifdef BPF_NO_GLOBAL_DATA
#define BPF_PRINTK_FMT_MOD
@@ -223,38 +247,38 @@ enum libbpf_tristate {
#define BPF_PRINTK_FMT_MOD static const
#endif
-#define __bpf_printk(fmt, ...) \
-({ \
- BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), \
- ##__VA_ARGS__); \
-})
+#define __bpf_printk(fmt, ...) \
+ ({ \
+ BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+ })
/*
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
* instead of an array of u64.
*/
-#define __bpf_vprintk(fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_trace_vprintk(___fmt, sizeof(___fmt), \
- ___param, sizeof(___param)); \
-})
+#define __bpf_vprintk(fmt, args...) \
+ ({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_trace_vprintk(___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+ })
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
* Otherwise use __bpf_vprintk
*/
-#define ___bpf_pick_printk(...) \
- ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
- __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
- __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
+#define ___bpf_pick_printk(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/, \
__bpf_printk /*1*/, __bpf_printk /*0*/)
/* Helper macro to print out debug messages */
diff --git a/pkg/collector/bpf/lib/bpf_path.h b/pkg/collector/bpf/lib/bpf_path.h
index c96986c8..394e63f0 100644
--- a/pkg/collector/bpf/lib/bpf_path.h
+++ b/pkg/collector/bpf/lib/bpf_path.h
@@ -12,8 +12,8 @@
#define UNRESOLVED_PATH_COMPONENTS 0x02
#define PROBE_MNT_ITERATIONS 8
-#define ENAMETOOLONG 36 /* File name too long */
-#define MAX_BUF_LEN 4096
+#define ENAMETOOLONG 36 /* File name too long */
+#define MAX_BUF_LEN 4096
/* buffer in the heap */
struct buffer_heap_map_value {
@@ -52,7 +52,7 @@ struct mnt_path_data {
*
* Returns pointer to mnt of real mount path.
*/
-FUNC_INLINE struct mount* real_mount(struct vfsmount *mnt)
+FUNC_INLINE struct mount *real_mount(struct vfsmount *mnt)
{
return container_of_btf(mnt, struct mount, mnt);
}
@@ -147,7 +147,7 @@ FUNC_INLINE long mnt_path_read(struct mnt_path_data *data)
int error;
bpf_probe_read(&curr_de, sizeof(curr_de), _(&mnt->mnt_mountpoint));
-
+
/* Global root? */
if (curr_de == prev_de || IS_ROOT(curr_de)) {
@@ -226,7 +226,7 @@ FUNC_INLINE int prepend_mnt_path(struct file *file, char *bf, char **buffer, int
*buffer = data.bptr;
*buflen = data.blen;
-
+
return error;
}
@@ -262,7 +262,7 @@ FUNC_INLINE int prepend_mnt_path(struct file *file, char *bf, char **buffer, int
*
* ps. The size of the path will be (initial value of buflen) - (return value of buflen) if (buflen != 0)
*/
-FUNC_INLINE char* __mnt_path_local(struct file *file, char *buf, int *buflen, int *error)
+FUNC_INLINE char *__mnt_path_local(struct file *file, char *buf, int *buflen, int *error)
{
char *res = buf + *buflen;
@@ -284,9 +284,9 @@ FUNC_INLINE char* __mnt_path_local(struct file *file, char *buf, int *buflen, in
* 'error' is 0 in case of success or UNRESOLVED_PATH_COMPONENTS in the case
* where the path is larger than the provided buffer.
*/
-FUNC_INLINE char* mnt_path_local(struct file *file, int *buflen, int *error)
+FUNC_INLINE char *mnt_path_local(struct file *file, int *buflen, int *error)
{
- int zero = 0;
+ int zero = 0;
char *buffer = 0;
buffer = bpf_map_lookup_elem(&buffer_heap_map, &zero);
diff --git a/pkg/collector/bpf/lib/config.h b/pkg/collector/bpf/lib/config.h
index 81a19381..19e93a49 100644
--- a/pkg/collector/bpf/lib/config.h
+++ b/pkg/collector/bpf/lib/config.h
@@ -16,4 +16,4 @@ struct {
__type(value, struct conf);
} conf_map SEC(".maps");
-#endif // __CONF_
+#endif // __CONF_
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.c b/pkg/collector/bpf/vfs/bpf_vfs.c
index e017df83..003e5952 100644
--- a/pkg/collector/bpf/vfs/bpf_vfs.c
+++ b/pkg/collector/bpf/vfs/bpf_vfs.c
@@ -18,37 +18,37 @@ char __license[] SEC("license") = "GPL";
#if defined(__TARGET_ARCH_x86)
SEC("fexit/vfs_write")
-__u64 BPF_PROG(fexit_vfs_write, struct file *file,
- const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+__u64 BPF_PROG(fexit_vfs_write, struct file *file,
+ const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
{
- return handle_rw_event(file, (__s64)ret, MODE_WRITE);
+ return handle_rw_event(file, (__s64)ret, MODE_WRITE);
}
SEC("fexit/vfs_read")
-__u64 BPF_PROG(fexit_vfs_read, struct file *file,
- char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+__u64 BPF_PROG(fexit_vfs_read, struct file *file,
+ char __user *buf, size_t count, loff_t *pos, ssize_t ret)
{
- return handle_rw_event(file, (__s64)ret, MODE_READ);
+ return handle_rw_event(file, (__s64)ret, MODE_READ);
}
SEC("fexit/vfs_writev")
-__u64 BPF_PROG(fexit_vfs_writev, struct file *file,
- const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+__u64 BPF_PROG(fexit_vfs_writev, struct file *file,
+ const char __user *buf, size_t count, loff_t *pos, ssize_t ret)
{
- return handle_rw_event(file, (__s64)ret, MODE_WRITE);
+ return handle_rw_event(file, (__s64)ret, MODE_WRITE);
}
SEC("fexit/vfs_readv")
-__u64 BPF_PROG(fexit_vfs_readv, struct file *file,
- char __user *buf, size_t count, loff_t *pos, ssize_t ret)
+__u64 BPF_PROG(fexit_vfs_readv, struct file *file,
+ char __user *buf, size_t count, loff_t *pos, ssize_t ret)
{
- return handle_rw_event(file, (__s64)ret, MODE_READ);
+ return handle_rw_event(file, (__s64)ret, MODE_READ);
}
SEC("fexit/vfs_open")
__u64 BPF_PROG(fexit_vfs_open, const struct path *path, struct file *file, int ret)
{
- return handle_inode_event((__s64)ret, MODE_OPEN);
+ return handle_inode_event((__s64)ret, MODE_OPEN);
}
/**
@@ -65,89 +65,89 @@ __u64 BPF_PROG(fexit_vfs_open, const struct path *path, struct file *file, int r
SEC("fexit/vfs_create")
__u64 BPF_PROG(fexit_vfs_create, struct inode *dir, struct dentry *dentry, umode_t mode,
- bool want_excl, int ret)
+ bool want_excl, int ret)
{
- return handle_inode_event((__s64)ret, MODE_CREATE);
+ return handle_inode_event((__s64)ret, MODE_CREATE);
}
SEC("fexit/vfs_mkdir")
__u64 BPF_PROG(fexit_vfs_mkdir, struct inode *dir, struct dentry *dentry, umode_t mode,
- int ret)
+ int ret)
{
- return handle_inode_event((__s64)ret, MODE_MKDIR);
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
}
SEC("fexit/vfs_unlink")
-__u64 BPF_PROG(fexit_vfs_unlink, struct inode *dir, struct dentry *dentry,
- struct inode **pdir, int ret)
+__u64 BPF_PROG(fexit_vfs_unlink, struct inode *dir, struct dentry *dentry,
+ struct inode **pdir, int ret)
{
- return handle_inode_event((__s64)ret, MODE_UNLINK);
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
}
SEC("fexit/vfs_rmdir")
__u64 BPF_PROG(fexit_vfs_rmdir, struct inode *dir, struct dentry *dentry, int ret)
{
- return handle_inode_event((__s64)ret, MODE_RMDIR);
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
}
#elif defined(__KERNEL_POST_v512_PRE_v62)
SEC("fexit/vfs_create")
-__u64 BPF_PROG(fexit_vfs_create, struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool want_excl, int ret)
+__u64 BPF_PROG(fexit_vfs_create, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool want_excl, int ret)
{
- return handle_inode_event((__s64)ret, MODE_CREATE);
+ return handle_inode_event((__s64)ret, MODE_CREATE);
}
SEC("fexit/vfs_mkdir")
-__u64 BPF_PROG(fexit_vfs_mkdir, struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, umode_t mode, int ret)
+__u64 BPF_PROG(fexit_vfs_mkdir, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, int ret)
{
- return handle_inode_event((__s64)ret, MODE_MKDIR);
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
}
SEC("fexit/vfs_unlink")
-__u64 BPF_PROG(fexit_vfs_unlink, struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, struct inode **pdir, int ret)
+__u64 BPF_PROG(fexit_vfs_unlink, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, struct inode **pdir, int ret)
{
- return handle_inode_event((__s64)ret, MODE_UNLINK);
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
}
SEC("fexit/vfs_rmdir")
-__u64 BPF_PROG(fexit_vfs_rmdir, struct user_namespace *mnt_userns, struct inode *dir,
- struct dentry *dentry, int ret)
+__u64 BPF_PROG(fexit_vfs_rmdir, struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, int ret)
{
- return handle_inode_event((__s64)ret, MODE_RMDIR);
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
}
#else
SEC("fexit/vfs_create")
-__u64 BPF_PROG(fexit_vfs_create, struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool want_excl, int ret)
+__u64 BPF_PROG(fexit_vfs_create, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool want_excl, int ret)
{
- return handle_inode_event((__s64)ret, MODE_CREATE);
+ return handle_inode_event((__s64)ret, MODE_CREATE);
}
SEC("fexit/vfs_mkdir")
-__u64 BPF_PROG(fexit_vfs_mkdir, struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, int ret)
+__u64 BPF_PROG(fexit_vfs_mkdir, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode, int ret)
{
- return handle_inode_event((__s64)ret, MODE_MKDIR);
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
}
SEC("fexit/vfs_unlink")
-__u64 BPF_PROG(fexit_vfs_unlink, struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, struct inode **pdir, int ret)
+__u64 BPF_PROG(fexit_vfs_unlink, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, struct inode **pdir, int ret)
{
- return handle_inode_event((__s64)ret, MODE_UNLINK);
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
}
SEC("fexit/vfs_rmdir")
-__u64 BPF_PROG(fexit_vfs_rmdir, struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, int ret)
+__u64 BPF_PROG(fexit_vfs_rmdir, struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, int ret)
{
- return handle_inode_event((__s64)ret, MODE_RMDIR);
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
}
#endif
@@ -157,8 +157,8 @@ __u64 BPF_PROG(fexit_vfs_rmdir, struct mnt_idmap *idmap, struct inode *dir,
SEC("kprobe/vfs_write")
__u64 kprobe_vfs_write(struct pt_regs *ctx)
{
- struct file *file = (struct file *) PT_REGS_PARM1(ctx);
- __u64 count = (__u64) PT_REGS_PARM3(ctx);
+ struct file *file = (struct file *)PT_REGS_PARM1(ctx);
+ __u64 count = (__u64)PT_REGS_PARM3(ctx);
return handle_rw_event(file, (__s64)count, MODE_WRITE);
}
@@ -166,68 +166,68 @@ __u64 kprobe_vfs_write(struct pt_regs *ctx)
SEC("kprobe/vfs_read")
__u64 kprobe_vfs_read(struct pt_regs *ctx)
{
- struct file *file = (struct file *) PT_REGS_PARM1(ctx);
- __u64 count = (__u64) PT_REGS_PARM3(ctx);
-
+ struct file *file = (struct file *)PT_REGS_PARM1(ctx);
+ __u64 count = (__u64)PT_REGS_PARM3(ctx);
+
return handle_rw_event(file, (__s64)count, MODE_READ);
}
SEC("kprobe/vfs_writev")
__u64 kprobe_vfs_writev(struct pt_regs *ctx)
{
- struct file *file = (struct file *) PT_REGS_PARM1(ctx);
- __u64 count = (__u64) PT_REGS_PARM3(ctx);
-
+ struct file *file = (struct file *)PT_REGS_PARM1(ctx);
+ __u64 count = (__u64)PT_REGS_PARM3(ctx);
+
return handle_rw_event(file, (__s64)count, MODE_WRITE);
}
SEC("kprobe/vfs_readv")
__u64 kprobe_vfs_readv(struct pt_regs *ctx)
{
- struct file *file = (struct file *) PT_REGS_PARM1(ctx);
- __u64 count = (__u64) PT_REGS_PARM3(ctx);
-
+ struct file *file = (struct file *)PT_REGS_PARM1(ctx);
+ __u64 count = (__u64)PT_REGS_PARM3(ctx);
+
return handle_rw_event(file, (__s64)count, MODE_READ);
}
SEC("kretprobe/vfs_create")
__u64 kretprobe_vfs_create(struct pt_regs *ctx)
{
- __s64 ret = (__s64) PT_REGS_RC(ctx);
-
- return handle_inode_event((__s64)ret, MODE_CREATE);
+ __s64 ret = (__s64)PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_CREATE);
}
SEC("kretprobe/vfs_open")
__u64 kretprobe_vfs_open(struct pt_regs *ctx)
{
- __s64 ret = (__s64) PT_REGS_RC(ctx);
-
- return handle_inode_event((__s64)ret, MODE_OPEN);
+ __s64 ret = (__s64)PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_OPEN);
}
SEC("kretprobe/vfs_mkdir")
__u64 kretprobe_vfs_mkdir(struct pt_regs *ctx)
{
- __s64 ret = (__s64) PT_REGS_RC(ctx);
-
- return handle_inode_event((__s64)ret, MODE_MKDIR);
+ __s64 ret = (__s64)PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_MKDIR);
}
SEC("kretprobe/vfs_unlink")
__u64 kretprobe_vfs_unlink(struct pt_regs *ctx)
{
- __s64 ret = (__s64) PT_REGS_RC(ctx);
-
- return handle_inode_event((__s64)ret, MODE_UNLINK);
+ __s64 ret = (__s64)PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_UNLINK);
}
SEC("kretprobe/vfs_rmdir")
__u64 kretprobe_vfs_rmdir(struct pt_regs *ctx)
{
- __s64 ret = (__s64) PT_REGS_RC(ctx);
-
- return handle_inode_event((__s64)ret, MODE_RMDIR);
+ __s64 ret = (__s64)PT_REGS_RC(ctx);
+
+ return handle_inode_event((__s64)ret, MODE_RMDIR);
}
#endif
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.h b/pkg/collector/bpf/vfs/bpf_vfs.h
index 9aa18147..2917d694 100644
--- a/pkg/collector/bpf/vfs/bpf_vfs.h
+++ b/pkg/collector/bpf/vfs/bpf_vfs.h
@@ -12,8 +12,8 @@
#include "bpf_path.h"
enum vfs_mode {
- MODE_READ,
- MODE_WRITE,
+ MODE_READ,
+ MODE_WRITE,
MODE_OPEN,
MODE_CREATE,
MODE_MKDIR,
@@ -24,7 +24,7 @@ enum vfs_mode {
/* vfs related event key struct */
struct vfs_event_key {
__u32 cid; /* cgroup ID */
- __u8 mnt[64]; /* Mount point */
+ __u8 mnt[64]; /* Mount point */
};
/* Any vfs read/write related event */
@@ -90,7 +90,7 @@ struct {
FUNC_INLINE __u32 get_mnt_path(struct vfs_event_key *key, struct file *file)
{
int flags = 0, size;
- char *buffer;
+ char *buffer;
buffer = mnt_path_local(file, &size, &flags);
if (!buffer)
@@ -98,7 +98,7 @@ FUNC_INLINE __u32 get_mnt_path(struct vfs_event_key *key, struct file *file)
asm volatile("%[size] &= 0xff;\n"
: [size] "+r"(size));
-
+
bpf_probe_read(key->mnt, sizeof(key->mnt), buffer);
return (__u32)size;
@@ -113,12 +113,13 @@ FUNC_INLINE __u32 get_mnt_path(struct vfs_event_key *key, struct file *file)
*
* Returns always 0.
*/
-FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
+FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type)
+{
// Important to initialise the struct with some values else verifier will complain
- struct vfs_event_key key = {0};
+ struct vfs_event_key key = { 0 };
// Get current cgroup ID. Works for both v1 and v2
- key.cid = (__u32) ceems_get_current_cgroup_id();
+ key.cid = (__u32)ceems_get_current_cgroup_id();
// If cgroup id is 1, it means it is root cgroup and we are not really interested
// in it and so return
@@ -135,17 +136,17 @@ FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
// Fetch event from correct map
switch (type) {
- case MODE_WRITE:
- event = bpf_map_lookup_elem(&write_accumulator, &key);
- break;
- case MODE_READ:
- event = bpf_map_lookup_elem(&read_accumulator, &key);
- break;
- default:
- return 0;
+ case MODE_WRITE:
+ event = bpf_map_lookup_elem(&write_accumulator, &key);
+ break;
+ case MODE_READ:
+ event = bpf_map_lookup_elem(&read_accumulator, &key);
+ break;
+ default:
+ return 0;
}
- if (!event) {
+ if (!event) {
// New event with increment call counter
struct vfs_rw_event new_event = { .bytes = 0, .calls = 1, .errors = 0 };
@@ -154,24 +155,24 @@ FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
new_event.bytes = 0;
new_event.errors = 1;
} else {
- new_event.bytes = (__u64) ret;
+ new_event.bytes = (__u64)ret;
new_event.errors = 0;
}
-
- // Update map with new key and event
+
+ // Update map with new key and event
switch (type) {
- case MODE_WRITE:
- bpf_map_update_elem(&write_accumulator, &key, &new_event, BPF_NOEXIST);
- break;
- case MODE_READ:
- bpf_map_update_elem(&read_accumulator, &key, &new_event, BPF_NOEXIST);
- break;
- default:
- return 0;
+ case MODE_WRITE:
+ bpf_map_update_elem(&write_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ case MODE_READ:
+ bpf_map_update_elem(&read_accumulator, &key, &new_event, BPF_NOEXIST);
+ break;
+ default:
+ return 0;
}
- return 0;
- }
+ return 0;
+ }
// Always increment calls
__sync_fetch_and_add(&event->calls, 1);
@@ -180,7 +181,7 @@ FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
if (ret < 0) {
__sync_fetch_and_add(&event->errors, 1);
} else {
- __sync_fetch_and_add(&event->bytes, (__u64) ret);
+ __sync_fetch_and_add(&event->bytes, (__u64)ret);
}
return 0;
@@ -194,9 +195,10 @@ FUNC_INLINE __u64 handle_rw_event(struct file *file, __s64 ret, int type) {
*
* Returns always 0.
*/
-FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type) {
+FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type)
+{
// Get cgroup ID
- __u32 key = (__u32) ceems_get_current_cgroup_id();
+ __u32 key = (__u32)ceems_get_current_cgroup_id();
// If cgroup id is 1, it means it is root cgroup and we are not really interested
// in it and so return
@@ -208,26 +210,26 @@ FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type) {
// Fetch event from correct map
switch (type) {
- case MODE_OPEN:
- event = bpf_map_lookup_elem(&open_accumulator, &key);
- break;
- case MODE_CREATE:
- event = bpf_map_lookup_elem(&create_accumulator, &key);
- break;
- case MODE_MKDIR:
- event = bpf_map_lookup_elem(&create_accumulator, &key);
- break;
- case MODE_RMDIR:
- event = bpf_map_lookup_elem(&unlink_accumulator, &key);
- break;
- case MODE_UNLINK:
- event = bpf_map_lookup_elem(&unlink_accumulator, &key);
- break;
- default:
- return 0;
+ case MODE_OPEN:
+ event = bpf_map_lookup_elem(&open_accumulator, &key);
+ break;
+ case MODE_CREATE:
+ event = bpf_map_lookup_elem(&create_accumulator, &key);
+ break;
+ case MODE_MKDIR:
+ event = bpf_map_lookup_elem(&create_accumulator, &key);
+ break;
+ case MODE_RMDIR:
+ event = bpf_map_lookup_elem(&unlink_accumulator, &key);
+ break;
+ case MODE_UNLINK:
+ event = bpf_map_lookup_elem(&unlink_accumulator, &key);
+ break;
+ default:
+ return 0;
}
- if (!event) {
+ if (!event) {
// New event with increment call counter
struct vfs_inode_event new_event = { .calls = 1, .errors = 0 };
@@ -235,8 +237,8 @@ FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type) {
if (ret) {
new_event.errors = 1;
}
-
- // Update map with new key and event
+
+ // Update map with new key and event
switch (type) {
case MODE_OPEN:
bpf_map_update_elem(&open_accumulator, &key, &new_event, BPF_NOEXIST);
@@ -257,10 +259,10 @@ FUNC_INLINE __u64 handle_inode_event(__s64 ret, int type) {
return 0;
}
- return 0;
- }
+ return 0;
+ }
- // Always increment calls
+ // Always increment calls
__sync_fetch_and_add(&event->calls, 1);
// In case of error increment errors else increment bytes
From 31fa4dd08ad70583b7431dcdd6d0c8fa8908c6eb Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Sun, 15 Sep 2024 18:28:19 +0200
Subject: [PATCH 13/18] refactor: Reorganize code of individual collectors
* Make a generic cgroup collector that can be used for different resource managers. Generic cgroup collector wont register itself to Collector interface and it is meant to be used in other collectors. Resource manager collectors must pass a list of valid cgroup paths to cgroup collector for metrics fetching.
* Similarly perf collector has been modified to become internal generic collector that must be called from other collectors specific to resource managers. Same goes to ebpf collector where it becomes an internal collector that is meant to be called from other collectors.
* cgroup, perf and ebpf collectors take an argument cgroupManager during instantiation that will direct on which processes/cgroups must be monitored.
* The side-effect is that we replicate CLI args for each resource manager but this should not be an issue as exporter on a given host will not/should not target two different resource managers. So, operators will never have to deal with duplication.
Signed-off-by: Mahendra Paipuri
---
pkg/collector/cgroup.go | 766 +++++++++++++++++++++++++++++---
pkg/collector/cgroup_test.go | 314 +++++++++++++
pkg/collector/ebpf.go | 518 ++++++++++++++--------
pkg/collector/ebpf_test.go | 172 +++++++-
pkg/collector/helper.go | 13 +-
pkg/collector/perf.go | 370 +++++++---------
pkg/collector/perf_test.go | 98 +++--
pkg/collector/slurm.go | 828 +++++++++++------------------------
pkg/collector/slurm_test.go | 266 ++++-------
9 files changed, 2069 insertions(+), 1276 deletions(-)
create mode 100644 pkg/collector/cgroup_test.go
diff --git a/pkg/collector/cgroup.go b/pkg/collector/cgroup.go
index 8b7135b6..7d7eb5c1 100644
--- a/pkg/collector/cgroup.go
+++ b/pkg/collector/cgroup.go
@@ -2,15 +2,23 @@ package collector
import (
"bufio"
+ "context"
"errors"
"fmt"
+ "math"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
+ "sync"
"github.com/containerd/cgroups/v3"
+ "github.com/containerd/cgroups/v3/cgroup1"
+ "github.com/containerd/cgroups/v3/cgroup2"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
)
const (
@@ -18,14 +26,16 @@ const (
// to define a max index for the default controllers on tasks.
// For further documentation check BPF part.
cgroupSubSysCount = 15
+ genericSubsystem = "compute"
)
-// Custom errors.
-var (
- ErrInvalidCgroupFS = errors.New("invalid cgroup file system")
+// Resource Managers.
+const (
+ slurm = "slurm"
+ libvirt = "libvirt"
)
-// Regular expressions of cgroup paths for different resource managers
+// Regular expressions of cgroup paths for different resource managers.
/*
For v1 possibilities are /cpuacct/slurm/uid_1000/job_211
/memory/slurm/uid_1000/job_211
@@ -39,78 +49,720 @@ var (
slurmIgnoreProcsRegex = regexp.MustCompile("slurmstepd:(.*)|sleep ([0-9]+)|/bin/bash (.*)/slurm_script")
)
-// cgroupFS is a struct that contains cgroup related info for a
-// given resource manager.
-type cgroupFS struct {
- mode cgroups.CGMode // cgroups mode: unified, legacy, hybrid
- root string // cgroups root
- mount string // path at which resource manager manages cgroups
- subsystem string // active subsystem in cgroups v1
- manager string // cgroup manager
- idRegex *regexp.Regexp // regular expression to capture cgroup ID
- pathFilter func(string) bool // function to filter cgroup paths. Function must return true if cgroup path must be ignored
- procFilter func(string) bool // function to filter processes in cgroup based on cmdline. Function must return true if process must be ignored
+// Ref: https://libvirt.org/cgroups.html#legacy-cgroups-layout
+var (
+ libvirtCgroupPathRegex = regexp.MustCompile("^.*/(?:.+?)instance-([0-9]+)(?:.*$)")
+)
+
+// CLI options.
+var (
+ activeController = CEEMSExporterApp.Flag(
+ "collector.cgroup.active-subsystem",
+ "Active cgroup subsystem for cgroups v1.",
+ ).Default("cpuacct").String()
+
+ // Hidden opts for e2e and unit tests.
+ forceCgroupsVersion = CEEMSExporterApp.Flag(
+ "collector.cgroups.force-version",
+ "Set cgroups version manually. Used only for testing.",
+ ).Hidden().Enum("v1", "v2")
+)
+
+// cgroupManager is the container that have cgroup information of resource manager.
+type cgroupManager struct {
+ mode cgroups.CGMode // cgroups mode: unified, legacy, hybrid
+ root string // cgroups root
+ slice string // Slice under which cgroups are managed eg system.slice, machine.slice
+ scope string // Scope under which cgroups are managed eg slurmstepd.scope, machine-qemu\x2d1\x2dvm1.scope
+ activeController string // Active controller for cgroups v1
+ mountPoint string // Path under which resource manager creates cgroups
+ manager string // cgroup manager
+ idRegex *regexp.Regexp // Regular expression to capture cgroup ID set by resource manager
+ pathFilter func(string) bool // Function to filter cgroup paths. Function must return true if cgroup path must be ignored
+ procFilter func(string) bool // Function to filter processes in cgroup based on cmdline. Function must return true if process must be ignored
}
-// cgroupController is a container for cgroup controllers in v1.
-type cgroupController struct {
- id uint64 // Hierarchy unique ID
- idx uint64 // Cgroup SubSys index
- name string // Controller name
- active bool // Will be set to true if controller is set and active
+// String implements stringer interface of the struct.
+func (c *cgroupManager) String() string {
+ return fmt.Sprintf("mode: %d root: %s slice: %s scope: %s mount: %s manager: %s", c.mode, c.root, c.slice, c.scope, c.mountPoint, c.manager)
}
-// slurmCgroupFS returns cgroupFS struct for SLURM.
-func slurmCgroupFS(cgroupRootPath, subsystem, forceCgroupsVersion string) cgroupFS {
- var cgroup cgroupFS
- if cgroups.Mode() == cgroups.Unified {
- cgroup = cgroupFS{
- mode: cgroups.Unified,
- root: cgroupRootPath,
- mount: filepath.Join(cgroupRootPath, "system.slice/slurmstepd.scope"),
+// setMountPoint sets mountPoint for thc cgroupManager struct.
+func (c *cgroupManager) setMountPoint() {
+ switch c.manager {
+ case slurm:
+ switch c.mode { //nolint:exhaustive
+ case cgroups.Unified:
+ // /sys/fs/cgroup/system.slice/slurmstepd.scope
+ c.mountPoint = filepath.Join(c.root, c.slice, c.scope)
+ default:
+ // /sys/fs/cgroup/cpuacct/slurm
+ c.mountPoint = filepath.Join(c.root, c.activeController, c.manager)
+
+ // For cgroups v1 we need to shift root to /sys/fs/cgroup/cpuacct
+ c.root = filepath.Join(c.root, c.activeController)
}
- } else {
- cgroup = cgroupFS{
- mode: cgroups.Mode(),
- root: filepath.Join(cgroupRootPath, subsystem),
- mount: filepath.Join(cgroupRootPath, subsystem, "slurm"),
- subsystem: subsystem,
+ case libvirt:
+ switch c.mode { //nolint:exhaustive
+ case cgroups.Unified:
+ // /sys/fs/cgroup/machine.slice
+ c.mountPoint = filepath.Join(c.root, c.slice)
+ default:
+ // /sys/fs/cgroup/cpuacct/machine.slice
+ c.mountPoint = filepath.Join(c.root, c.activeController, c.slice)
+
+ // For cgroups v1 we need to shift root to /sys/fs/cgroup/cpuacct
+ c.root = filepath.Join(c.root, c.activeController)
}
+ default:
+ c.mountPoint = c.root
}
+}
+
+// NewCgroupManager returns an instance of cgroupManager based on resource manager.
+func NewCgroupManager(name string) (*cgroupManager, error) {
+ var manager *cgroupManager
- // For overriding in tests
- if forceCgroupsVersion != "" {
- if forceCgroupsVersion == "v2" {
- cgroup = cgroupFS{
+ switch name {
+ case slurm:
+ if (*forceCgroupsVersion == "" && cgroups.Mode() == cgroups.Unified) || *forceCgroupsVersion == "v2" {
+ manager = &cgroupManager{
mode: cgroups.Unified,
- root: cgroupRootPath,
- mount: filepath.Join(cgroupRootPath, "system.slice/slurmstepd.scope"),
+ root: *cgroupfsPath,
+ slice: "system.slice",
+ scope: "slurmstepd.scope",
+ }
+ } else {
+ var mode cgroups.CGMode
+ if *forceCgroupsVersion == "v1" {
+ mode = cgroups.Legacy
+ } else {
+ mode = cgroups.Mode()
+ }
+
+ manager = &cgroupManager{
+ mode: mode,
+ root: *cgroupfsPath,
+ activeController: *activeController,
+ slice: slurm,
+ }
+ }
+
+ // Add manager field
+ manager.manager = slurm
+
+ // Add path regex
+ manager.idRegex = slurmCgroupPathRegex
+
+ // Add filter functions
+ manager.pathFilter = func(p string) bool {
+ return strings.Contains(p, "/step_")
+ }
+ manager.procFilter = func(p string) bool {
+ return slurmIgnoreProcsRegex.MatchString(p)
+ }
+
+ // Set mountpoint
+ manager.setMountPoint()
+
+ return manager, nil
+
+ case libvirt:
+ if (*forceCgroupsVersion == "" && cgroups.Mode() == cgroups.Unified) || *forceCgroupsVersion == "v2" {
+ manager = &cgroupManager{
+ mode: cgroups.Unified,
+ root: *cgroupfsPath,
+ slice: "machine.slice",
+ }
+ } else {
+ var mode cgroups.CGMode
+ if *forceCgroupsVersion == "v1" {
+ mode = cgroups.Legacy
+ } else {
+ mode = cgroups.Mode()
+ }
+
+ manager = &cgroupManager{
+ mode: mode,
+ root: *cgroupfsPath,
+ activeController: *activeController,
+ slice: "machine.slice",
+ }
+ }
+
+ // Add manager field
+ manager.manager = libvirt
+
+ // Add path regex
+ manager.idRegex = libvirtCgroupPathRegex
+
+ // Add filter functions
+ manager.pathFilter = func(p string) bool {
+ return false
+ }
+ manager.procFilter = func(p string) bool {
+ return false
+ }
+
+ // Set mountpoint
+ manager.setMountPoint()
+
+ return manager, nil
+
+ default:
+ return nil, errors.New("unknown resource manager")
+ }
+}
+
+// cgMetric contains metrics returned by cgroup.
+type cgMetric struct {
+ path string
+ cpuUser float64
+ cpuSystem float64
+ cpuTotal float64
+ cpus int
+ cpuPressure float64
+ memoryRSS float64
+ memoryCache float64
+ memoryUsed float64
+ memoryTotal float64
+ memoryFailCount float64
+ memswUsed float64
+ memswTotal float64
+ memswFailCount float64
+ memoryPressure float64
+ rdmaHCAHandles map[string]float64
+ rdmaHCAObjects map[string]float64
+ uuid string
+ err bool
+}
+
+// cgroupCollector collects cgroup metrics for different resource managers.
+type cgroupCollector struct {
+ logger log.Logger
+ cgroupManager *cgroupManager
+ opts cgroupOpts
+ hostname string
+ hostMemTotal float64
+ numCgs *prometheus.Desc
+ cgCPUUser *prometheus.Desc
+ cgCPUSystem *prometheus.Desc
+ cgCPUs *prometheus.Desc
+ cgCPUPressure *prometheus.Desc
+ cgMemoryRSS *prometheus.Desc
+ cgMemoryCache *prometheus.Desc
+ cgMemoryUsed *prometheus.Desc
+ cgMemoryTotal *prometheus.Desc
+ cgMemoryFailCount *prometheus.Desc
+ cgMemswUsed *prometheus.Desc
+ cgMemswTotal *prometheus.Desc
+ cgMemswFailCount *prometheus.Desc
+ cgMemoryPressure *prometheus.Desc
+ cgRDMAHCAHandles *prometheus.Desc
+ cgRDMAHCAObjects *prometheus.Desc
+ collectError *prometheus.Desc
+}
+
+type cgroupOpts struct {
+ collectSwapMemStats bool
+ collectPSIStats bool
+}
+
+// NewCgroupCollector returns a new cgroupCollector exposing a summary of cgroups.
+func NewCgroupCollector(logger log.Logger, cgManager *cgroupManager, opts cgroupOpts) (*cgroupCollector, error) {
+ // Get total memory of host
+ var memTotal float64
+
+ file, err := os.Open(procFilePath("meminfo"))
+ if err == nil {
+ if memInfo, err := parseMemInfo(file); err == nil {
+ memTotal = memInfo["MemTotal_bytes"]
+ }
+ } else {
+ level.Error(logger).Log("msg", "Failed to get total memory of the host", "err", err)
+ }
+
+ defer file.Close()
+
+ return &cgroupCollector{
+ logger: logger,
+ cgroupManager: cgManager,
+ opts: opts,
+ hostMemTotal: memTotal,
+ hostname: hostname,
+ numCgs: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "units"),
+ "Total number of jobs",
+ []string{"manager", "hostname"},
+ nil,
+ ),
+ cgCPUUser: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_user_seconds_total"),
+ "Total job CPU user seconds",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgCPUSystem: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_system_seconds_total"),
+ "Total job CPU system seconds",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgCPUs: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpus"),
+ "Total number of job CPUs",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgCPUPressure: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_psi_seconds"),
+ "Total CPU PSI in seconds",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryRSS: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_rss_bytes"),
+ "Memory RSS used in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryCache: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_cache_bytes"),
+ "Memory cache used in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryUsed: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_used_bytes"),
+ "Memory used in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryTotal: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_total_bytes"),
+ "Memory total in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryFailCount: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_fail_count"),
+ "Memory fail count",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemswUsed: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_used_bytes"),
+ "Swap used in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemswTotal: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_total_bytes"),
+ "Swap total in bytes",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemswFailCount: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_fail_count"),
+ "Swap fail count",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgMemoryPressure: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_psi_seconds"),
+ "Total memory PSI in seconds",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ cgRDMAHCAHandles: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_rdma_hca_handles"),
+ "Current number of RDMA HCA handles",
+ []string{"manager", "hostname", "uuid", "device"},
+ nil,
+ ),
+ cgRDMAHCAObjects: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "unit_rdma_hca_objects"),
+ "Current number of RDMA HCA objects",
+ []string{"manager", "hostname", "uuid", "device"},
+ nil,
+ ),
+ collectError: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, genericSubsystem, "collect_error"),
+ "Indicates collection error, 0=no error, 1=error",
+ []string{"manager", "hostname", "uuid"},
+ nil,
+ ),
+ }, nil
+}
+
+// Update updates cgroup metrics on given channel.
+func (c *cgroupCollector) Update(ch chan<- prometheus.Metric, metrics []cgMetric) error {
+ // Fetch metrics
+ metrics = c.doUpdate(metrics)
+
+ // First send num jobs on the current host
+ ch <- prometheus.MustNewConstMetric(c.numCgs, prometheus.GaugeValue, float64(len(metrics)), c.cgroupManager.manager, c.hostname)
+
+ // Send metrics of each cgroup
+ for _, m := range metrics {
+ if m.err {
+ ch <- prometheus.MustNewConstMetric(c.collectError, prometheus.GaugeValue, float64(1), c.cgroupManager.manager, c.hostname, m.uuid)
+ }
+
+ // CPU stats
+ ch <- prometheus.MustNewConstMetric(c.cgCPUUser, prometheus.CounterValue, m.cpuUser, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgCPUSystem, prometheus.CounterValue, m.cpuSystem, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgCPUs, prometheus.GaugeValue, float64(m.cpus), c.cgroupManager.manager, c.hostname, m.uuid)
+
+ // Memory stats
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryRSS, prometheus.GaugeValue, m.memoryRSS, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryCache, prometheus.GaugeValue, m.memoryCache, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryUsed, prometheus.GaugeValue, m.memoryUsed, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryTotal, prometheus.GaugeValue, m.memoryTotal, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryFailCount, prometheus.GaugeValue, m.memoryFailCount, c.cgroupManager.manager, c.hostname, m.uuid)
+
+ // Memory swap stats
+ if c.opts.collectSwapMemStats {
+ ch <- prometheus.MustNewConstMetric(c.cgMemswUsed, prometheus.GaugeValue, m.memswUsed, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemswTotal, prometheus.GaugeValue, m.memswTotal, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemswFailCount, prometheus.GaugeValue, m.memswFailCount, c.cgroupManager.manager, c.hostname, m.uuid)
+ }
+
+ // PSI stats
+ if c.opts.collectPSIStats {
+ ch <- prometheus.MustNewConstMetric(c.cgCPUPressure, prometheus.GaugeValue, m.cpuPressure, c.cgroupManager.manager, c.hostname, m.uuid)
+ ch <- prometheus.MustNewConstMetric(c.cgMemoryPressure, prometheus.GaugeValue, m.memoryPressure, c.cgroupManager.manager, c.hostname, m.uuid)
+ }
+
+ // RDMA stats
+ for device, handles := range m.rdmaHCAHandles {
+ if handles > 0 {
+ ch <- prometheus.MustNewConstMetric(c.cgRDMAHCAHandles, prometheus.GaugeValue, handles, c.cgroupManager.manager, c.hostname, m.uuid, device)
+ }
+ }
+
+ for device, objects := range m.rdmaHCAHandles {
+ if objects > 0 {
+ ch <- prometheus.MustNewConstMetric(c.cgRDMAHCAObjects, prometheus.GaugeValue, objects, c.cgroupManager.manager, c.hostname, m.uuid, device)
+ }
+ }
+ }
+
+ return nil
+}
+
+// Stop releases any system resources held by collector.
+func (c *cgroupCollector) Stop(_ context.Context) error {
+ return nil
+}
+
+// doUpdate gets metrics of current active cgroups.
+func (c *cgroupCollector) doUpdate(metrics []cgMetric) []cgMetric {
+ // Start wait group for go routines
+ wg := &sync.WaitGroup{}
+ wg.Add(len(metrics))
+
+ // No need for any lock primitives here as we read/write
+ // a different element of slice in each go routine
+ for i := range len(metrics) {
+ go func(idx int) {
+ defer wg.Done()
+
+ c.update(&metrics[idx])
+ }(i)
+ }
+
+ // Wait for all go routines
+ wg.Wait()
+
+ return metrics
+}
+
+// update get metrics of a given cgroup path.
+func (c *cgroupCollector) update(m *cgMetric) {
+ if c.cgroupManager.mode == cgroups.Unified {
+ c.statsV2(m)
+ } else {
+ c.statsV1(m)
+ }
+}
+
+// parseCPUSet parses cpuset.cpus file to return a list of CPUs in the cgroup.
+func (c *cgroupCollector) parseCPUSet(cpuset string) ([]string, error) {
+ var cpus []string
+
+ var start, end int
+
+ var err error
+
+ if cpuset == "" {
+ return nil, errors.New("empty cpuset file")
+ }
+
+ ranges := strings.Split(cpuset, ",")
+ for _, r := range ranges {
+ boundaries := strings.Split(r, "-")
+ if len(boundaries) == 1 {
+ start, err = strconv.Atoi(boundaries[0])
+ if err != nil {
+ return nil, err
+ }
+
+ end = start
+ } else if len(boundaries) == 2 {
+ start, err = strconv.Atoi(boundaries[0])
+ if err != nil {
+ return nil, err
}
- } else if forceCgroupsVersion == "v1" {
- cgroup = cgroupFS{
- mode: cgroups.Legacy,
- root: filepath.Join(cgroupRootPath, subsystem),
- mount: filepath.Join(cgroupRootPath, subsystem, "slurm"),
- subsystem: subsystem,
+
+ end, err = strconv.Atoi(boundaries[1])
+ if err != nil {
+ return nil, err
}
}
+
+ for e := start; e <= end; e++ {
+ cpu := strconv.Itoa(e)
+ cpus = append(cpus, cpu)
+ }
+ }
+
+ return cpus, nil
+}
+
+// getCPUs returns list of CPUs in the cgroup.
+func (c *cgroupCollector) getCPUs(path string) ([]string, error) {
+ var cpusPath string
+ if c.cgroupManager.mode == cgroups.Unified {
+ cpusPath = fmt.Sprintf("%s%s/cpuset.cpus.effective", *cgroupfsPath, path)
+ } else {
+ cpusPath = fmt.Sprintf("%s/cpuset%s/cpuset.cpus", *cgroupfsPath, path)
+ }
+
+ if !fileExists(cpusPath) {
+ return nil, fmt.Errorf("cpuset file %s not found", cpusPath)
}
- // Add manager field
- cgroup.manager = "slurm"
+ cpusData, err := os.ReadFile(cpusPath)
+ if err != nil {
+ level.Error(c.logger).Log("msg", "Error reading cpuset", "cpuset", cpusPath, "err", err)
- // Add path regex
- cgroup.idRegex = slurmCgroupPathRegex
+ return nil, err
+ }
- // Add filter functions
- cgroup.pathFilter = func(p string) bool {
- return strings.Contains(p, "/step_")
+ cpus, err := c.parseCPUSet(strings.TrimSuffix(string(cpusData), "\n"))
+ if err != nil {
+ level.Error(c.logger).Log("msg", "Error parsing cpuset", "cpuset", cpusPath, "err", err)
+
+ return nil, err
}
- cgroup.procFilter = func(p string) bool {
- return slurmIgnoreProcsRegex.MatchString(p)
+
+ return cpus, nil
+}
+
+// statsV1 fetches metrics from cgroups v1.
+func (c *cgroupCollector) statsV1(metric *cgMetric) {
+ path := metric.path
+
+ level.Debug(c.logger).Log("msg", "Loading cgroup v1", "path", path)
+
+ ctrl, err := cgroup1.Load(cgroup1.StaticPath(path), cgroup1.WithHierarchy(subsystem))
+ if err != nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", path, "err", err)
+
+ return
+ }
+
+ // Load cgroup stats
+ stats, err := ctrl.Stat(cgroup1.IgnoreNotExist)
+ if err != nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Failed to stat cgroups", "path", path, "err", err)
+
+ return
+ }
+
+ if stats == nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Cgroup stats are nil", "path", path)
+
+ return
+ }
+
+ // Get CPU stats
+ if stats.GetCPU() != nil {
+ if stats.GetCPU().GetUsage() != nil {
+ metric.cpuUser = float64(stats.GetCPU().GetUsage().GetUser()) / 1000000000.0
+ metric.cpuSystem = float64(stats.GetCPU().GetUsage().GetKernel()) / 1000000000.0
+ metric.cpuTotal = float64(stats.GetCPU().GetUsage().GetTotal()) / 1000000000.0
+ }
+ }
+
+ if cpus, err := c.getCPUs(path); err == nil {
+ metric.cpus = len(cpus)
+ }
+
+ // Get memory stats
+ if stats.GetMemory() != nil {
+ metric.memoryRSS = float64(stats.GetMemory().GetTotalRSS())
+ metric.memoryCache = float64(stats.GetMemory().GetTotalCache())
+
+ if stats.GetMemory().GetUsage() != nil {
+ metric.memoryUsed = float64(stats.GetMemory().GetUsage().GetUsage())
+ // If memory usage limit is set as "max", cgroups lib will set it to
+ // math.MaxUint64. Here we replace it with total system memory
+ if stats.GetMemory().GetUsage().GetLimit() == math.MaxUint64 && c.hostMemTotal != 0 {
+ metric.memoryTotal = c.hostMemTotal
+ } else {
+ metric.memoryTotal = float64(stats.GetMemory().GetUsage().GetLimit())
+ }
+
+ metric.memoryFailCount = float64(stats.GetMemory().GetUsage().GetFailcnt())
+ }
+
+ if stats.GetMemory().GetSwap() != nil {
+ metric.memswUsed = float64(stats.GetMemory().GetSwap().GetUsage())
+ // If memory usage limit is set as "max", cgroups lib will set it to
+ // math.MaxUint64. Here we replace it with total system memory
+ if stats.GetMemory().GetSwap().GetLimit() == math.MaxUint64 && c.hostMemTotal != 0 {
+ metric.memswTotal = c.hostMemTotal
+ } else {
+ metric.memswTotal = float64(stats.GetMemory().GetSwap().GetLimit())
+ }
+
+ metric.memswFailCount = float64(stats.GetMemory().GetSwap().GetFailcnt())
+ }
}
- return cgroup
+ // Get RDMA metrics if available
+ if stats.GetRdma() != nil {
+ metric.rdmaHCAHandles = make(map[string]float64)
+ metric.rdmaHCAObjects = make(map[string]float64)
+
+ for _, device := range stats.GetRdma().GetCurrent() {
+ metric.rdmaHCAHandles[device.GetDevice()] = float64(device.GetHcaHandles())
+ metric.rdmaHCAObjects[device.GetDevice()] = float64(device.GetHcaObjects())
+ }
+ }
+}
+
+// statsV2 fetches metrics from cgroups v2.
+func (c *cgroupCollector) statsV2(metric *cgMetric) {
+ path := metric.path
+
+ level.Debug(c.logger).Log("msg", "Loading cgroup v2", "path", path)
+
+ // Load cgroups
+ ctrl, err := cgroup2.Load(path, cgroup2.WithMountpoint(*cgroupfsPath))
+ if err != nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", path, "err", err)
+
+ return
+ }
+
+ // Get stats from cgroup
+ stats, err := ctrl.Stat()
+ if err != nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Failed to stat cgroups", "path", path, "err", err)
+
+ return
+ }
+
+ if stats == nil {
+ metric.err = true
+
+ level.Error(c.logger).Log("msg", "Cgroup stats are nil", "path", path)
+
+ return
+ }
+
+ // Get CPU stats
+ if stats.GetCPU() != nil {
+ metric.cpuUser = float64(stats.GetCPU().GetUserUsec()) / 1000000.0
+ metric.cpuSystem = float64(stats.GetCPU().GetSystemUsec()) / 1000000.0
+ metric.cpuTotal = float64(stats.GetCPU().GetUsageUsec()) / 1000000.0
+
+ if stats.GetCPU().GetPSI() != nil {
+ metric.cpuPressure = float64(stats.GetCPU().GetPSI().GetFull().GetTotal()) / 1000000.0
+ }
+ }
+
+ if cpus, err := c.getCPUs(path); err == nil {
+ metric.cpus = len(cpus)
+ }
+
+ // Get memory stats
+ // cgroups2 does not expose swap memory events. So we dont set memswFailCount
+ if stats.GetMemory() != nil {
+ metric.memoryUsed = float64(stats.GetMemory().GetUsage())
+ // If memory usage limit is set as "max", cgroups lib will set it to
+ // math.MaxUint64. Here we replace it with total system memory
+ if stats.GetMemory().GetUsageLimit() == math.MaxUint64 && c.hostMemTotal > 0 {
+ metric.memoryTotal = c.hostMemTotal
+ } else {
+ metric.memoryTotal = float64(stats.GetMemory().GetUsageLimit())
+ }
+
+ metric.memoryCache = float64(stats.GetMemory().GetFile()) // This is page cache
+ metric.memoryRSS = float64(stats.GetMemory().GetAnon())
+ metric.memswUsed = float64(stats.GetMemory().GetSwapUsage())
+ // If memory usage limit is set as "max", cgroups lib will set it to
+ // math.MaxUint64. Here we replace it with total system memory
+ if stats.GetMemory().GetSwapLimit() == math.MaxUint64 && c.hostMemTotal > 0 {
+ metric.memswTotal = c.hostMemTotal
+ } else {
+ metric.memswTotal = float64(stats.GetMemory().GetSwapLimit())
+ }
+
+ if stats.GetMemory().GetPSI() != nil {
+ metric.memoryPressure = float64(stats.GetMemory().GetPSI().GetFull().GetTotal()) / 1000000.0
+ }
+ }
+ // Get memory events
+ if stats.GetMemoryEvents() != nil {
+ metric.memoryFailCount = float64(stats.GetMemoryEvents().GetOom())
+ }
+
+ // Get RDMA stats
+ if stats.GetRdma() != nil {
+ metric.rdmaHCAHandles = make(map[string]float64)
+ metric.rdmaHCAObjects = make(map[string]float64)
+
+ for _, device := range stats.GetRdma().GetCurrent() {
+ metric.rdmaHCAHandles[device.GetDevice()] = float64(device.GetHcaHandles())
+ metric.rdmaHCAObjects[device.GetDevice()] = float64(device.GetHcaObjects())
+ }
+ }
+}
+
+// subsystem returns cgroups v1 subsystems.
+func subsystem() ([]cgroup1.Subsystem, error) {
+ s := []cgroup1.Subsystem{
+ cgroup1.NewCpuacct(*cgroupfsPath),
+ cgroup1.NewMemory(*cgroupfsPath),
+ cgroup1.NewRdma(*cgroupfsPath),
+ cgroup1.NewPids(*cgroupfsPath),
+ cgroup1.NewBlkio(*cgroupfsPath),
+ cgroup1.NewCpuset(*cgroupfsPath),
+ }
+
+ return s, nil
+}
+
+// cgroupController is a container for cgroup controllers in v1.
+type cgroupController struct {
+ id uint64 // Hierarchy unique ID
+ idx uint64 // Cgroup SubSys index
+ name string // Controller name
+ active bool // Will be set to true if controller is set and active
}
// parseCgroupSubSysIds returns cgroup controllers for cgroups v1.
diff --git a/pkg/collector/cgroup_test.go b/pkg/collector/cgroup_test.go
new file mode 100644
index 00000000..b280d73a
--- /dev/null
+++ b/pkg/collector/cgroup_test.go
@@ -0,0 +1,314 @@
+//go:build !noslurm
+// +build !noslurm
+
+package collector
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containerd/cgroups/v3"
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewCgroupCollector(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ },
+ )
+ require.NoError(t, err)
+
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ mountPoint: "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope",
+ idRegex: slurmCgroupPathRegex,
+ }
+
+ // opts
+ opts := cgroupOpts{
+ collectSwapMemStats: true,
+ collectPSIStats: true,
+ }
+
+ collector, err := NewCgroupCollector(log.NewNopLogger(), cgManager, opts)
+ require.NoError(t, err)
+
+ // Setup background goroutine to capture metrics.
+ metrics := make(chan prometheus.Metric)
+ defer close(metrics)
+
+ go func() {
+ i := 0
+ for range metrics {
+ i++
+ }
+ }()
+
+ err = collector.Update(metrics, nil)
+ require.NoError(t, err)
+
+ err = collector.Stop(context.Background())
+ require.NoError(t, err)
+}
+
+func TestCgroupsV2Metrics(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ },
+ )
+ require.NoError(t, err)
+
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ mountPoint: "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope",
+ idRegex: slurmCgroupPathRegex,
+ }
+
+ // opts
+ opts := cgroupOpts{
+ collectSwapMemStats: true,
+ collectPSIStats: true,
+ }
+
+ c := cgroupCollector{
+ cgroupManager: cgManager,
+ opts: opts,
+ hostMemTotal: float64(123456),
+ logger: log.NewNopLogger(),
+ }
+
+ expectedMetrics := cgMetric{
+ path: "/system.slice/slurmstepd.scope/job_1009249",
+ cpuUser: 60375.292848,
+ cpuSystem: 115.777502,
+ cpuTotal: 60491.070351,
+ cpus: 2,
+ cpuPressure: 0,
+ memoryRSS: 4.098592768e+09,
+ memoryCache: 0,
+ memoryUsed: 4.111491072e+09,
+ memoryTotal: 4.294967296e+09,
+ memoryFailCount: 0,
+ memswUsed: 0,
+ memswTotal: 123456,
+ memswFailCount: 0,
+ memoryPressure: 0,
+ rdmaHCAHandles: map[string]float64{"hfi1_0": 479, "hfi1_1": 1479, "hfi1_2": 2479},
+ rdmaHCAObjects: map[string]float64{"hfi1_0": 340, "hfi1_1": 1340, "hfi1_2": 2340},
+ err: false,
+ }
+
+ metric := c.doUpdate([]cgMetric{{path: expectedMetrics.path}})
+ assert.Equal(t, expectedMetrics, metric[0])
+}
+
+func TestCgroupsV1Metrics(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ },
+ )
+ require.NoError(t, err)
+
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Legacy,
+ mountPoint: "testdata/sys/fs/cgroup/cpuacct/slurm",
+ idRegex: slurmCgroupPathRegex,
+ }
+
+ // opts
+ opts := cgroupOpts{
+ collectSwapMemStats: true,
+ collectPSIStats: true,
+ }
+
+ c := cgroupCollector{
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+ opts: opts,
+ hostMemTotal: float64(123456),
+ }
+
+ expectedMetrics := cgMetric{
+ path: "/slurm/uid_1000/job_1009248",
+ cpuUser: 0.39,
+ cpuSystem: 0.45,
+ cpuTotal: 1.012410966,
+ cpus: 0,
+ cpuPressure: 0,
+ memoryRSS: 1.0407936e+07,
+ memoryCache: 2.1086208e+07,
+ memoryUsed: 4.0194048e+07,
+ memoryTotal: 2.01362030592e+11,
+ memoryFailCount: 0,
+ memswUsed: 4.032512e+07,
+ memswTotal: 9.223372036854772e+18,
+ memswFailCount: 0,
+ memoryPressure: 0,
+ rdmaHCAHandles: map[string]float64{"hfi1_0": 479, "hfi1_1": 1479, "hfi1_2": 2479},
+ rdmaHCAObjects: map[string]float64{"hfi1_0": 340, "hfi1_1": 1340, "hfi1_2": 2340},
+ err: false,
+ }
+
+ metric := c.doUpdate([]cgMetric{{path: expectedMetrics.path}})
+ assert.Equal(t, expectedMetrics, metric[0])
+}
+
+func TestNewCgroupManagerV2(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ "--collector.cgroups.force-version", "v2",
+ },
+ )
+ require.NoError(t, err)
+
+ // Slurm case
+ manager, err := NewCgroupManager("slurm")
+ require.NoError(t, err)
+
+ assert.Equal(t, "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope", manager.mountPoint)
+ assert.NotNil(t, manager.pathFilter)
+ assert.NotNil(t, manager.procFilter)
+
+ // libvirt case
+ manager, err = NewCgroupManager("libvirt")
+ require.NoError(t, err)
+
+ assert.Equal(t, "testdata/sys/fs/cgroup/machine.slice", manager.mountPoint)
+ assert.NotNil(t, manager.pathFilter)
+ assert.NotNil(t, manager.procFilter)
+}
+
+func TestNewCgroupManagerV1(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.cgroupfs", "testdata/sys/fs/cgroup",
+ "--collector.cgroups.force-version", "v1",
+ },
+ )
+ require.NoError(t, err)
+
+ // Slurm case
+ manager, err := NewCgroupManager("slurm")
+ require.NoError(t, err)
+
+ assert.Equal(t, "testdata/sys/fs/cgroup/cpuacct/slurm", manager.mountPoint)
+ assert.NotNil(t, manager.pathFilter)
+ assert.NotNil(t, manager.procFilter)
+
+ // libvirt case
+ manager, err = NewCgroupManager("libvirt")
+ require.NoError(t, err)
+
+ assert.Equal(t, "testdata/sys/fs/cgroup/cpuacct/machine.slice", manager.mountPoint)
+ assert.NotNil(t, manager.pathFilter)
+ assert.NotNil(t, manager.procFilter)
+
+ // Check error for unknown resource manager
+ _, err = NewCgroupManager("unknown")
+ assert.Error(t, err)
+}
+
+func TestParseCgroupSubSysIds(t *testing.T) {
+ _, err := CEEMSExporterApp.Parse(
+ []string{
+ "--path.procfs", "testdata/proc",
+ },
+ )
+ require.NoError(t, err)
+
+ controllers, err := parseCgroupSubSysIds()
+ require.NoError(t, err)
+
+ expectedControllers := []cgroupController{
+ {
+ id: 5,
+ idx: 0,
+ name: "cpuset",
+ active: true,
+ },
+ {
+ id: 6,
+ idx: 1,
+ name: "cpu",
+ active: true,
+ },
+ {
+ id: 6,
+ idx: 2,
+ name: "cpuacct",
+ active: true,
+ },
+ {
+ id: 12,
+ idx: 3,
+ name: "blkio",
+ active: true,
+ },
+ {
+ id: 7,
+ idx: 4,
+ name: "memory",
+ active: true,
+ },
+ {
+ id: 11,
+ idx: 5,
+ name: "devices",
+ active: true,
+ },
+ {
+ id: 2,
+ idx: 6,
+ name: "freezer",
+ active: true,
+ },
+ {
+ id: 4,
+ idx: 7,
+ name: "net_cls",
+ active: true,
+ },
+ {
+ id: 3,
+ idx: 8,
+ name: "perf_event",
+ active: true,
+ },
+ {
+ id: 4,
+ idx: 9,
+ name: "net_prio",
+ active: true,
+ },
+ {
+ id: 8,
+ idx: 10,
+ name: "hugetlb",
+ active: true,
+ },
+ {
+ id: 9,
+ idx: 11,
+ name: "pids",
+ active: true,
+ },
+ {
+ id: 10,
+ idx: 12,
+ name: "rdma",
+ active: true,
+ },
+ }
+
+ assert.ElementsMatch(t, expectedControllers, controllers)
+}
diff --git a/pkg/collector/ebpf.go b/pkg/collector/ebpf.go
index 1f40d341..316537ce 100644
--- a/pkg/collector/ebpf.go
+++ b/pkg/collector/ebpf.go
@@ -34,16 +34,21 @@ const (
ebpfCollectorSubsystem = "ebpf"
)
-// CLI options.
+// Custom errors.
var (
- collectNetMetrics = CEEMSExporterApp.Flag(
- "collector.ebpf.network-metrics",
- "Enables collection of network metrics by epf (default: enabled)",
- ).Default("true").Bool()
- collectVFSMetrics = CEEMSExporterApp.Flag(
- "collector.ebpf.vfs-metrics",
- "Enables collection of VFS metrics by epf (default: enabled)",
- ).Default("true").Bool()
+ errMapNotFound = errors.New("map not found")
+)
+
+// Network enum maps.
+var (
+ protoMap = map[int]string{
+ unix.IPPROTO_TCP: "tcp",
+ unix.IPPROTO_UDP: "udp",
+ }
+ familyMap = map[int]string{
+ unix.AF_INET: "ipv4",
+ unix.AF_INET6: "ipv6",
+ }
)
// bpfConfig is a container for the config that is passed to bpf progs.
@@ -60,8 +65,9 @@ type bpfNetEvent struct {
// bpfNetEventKey is key struct for storing network events in the bpf maps.
type bpfNetEventKey struct {
- Cid uint32
- Dev [16]uint8
+ Cid uint32
+ Proto uint16
+ Fam uint16
}
// bpfVfsInodeEvent is value struct for storing VFS inode related
@@ -85,13 +91,33 @@ type bpfVfsEventKey struct {
Mnt [64]uint8
}
+// promVfsEventKey is translated bpfVfsEventKey to Prometheus labels.
+type promVfsEventKey struct {
+ UUID string
+ Mount string
+}
+
+// promNetEventKey is translated bpfNetEventKey to Prometheus labels.
+type promNetEventKey struct {
+ UUID string
+ Proto string
+ Family string
+}
+
+type ebpfOpts struct {
+ vfsStatsEnabled bool
+ netStatsEnabled bool
+ vfsMountPoints []string
+}
+
type ebpfCollector struct {
logger log.Logger
hostname string
- cgroupFS cgroupFS
- inodesMap map[uint64]string
- inodesRevMap map[string]uint64
- activeCgroups []uint64
+ opts ebpfOpts
+ cgroupManager *cgroupManager
+ cgroupIDUUIDCache map[uint64]string
+ cgroupPathIDCache map[string]uint64
+ activeCgroupIDs []uint64
netColl *ebpf.Collection
vfsColl *ebpf.Collection
links map[string]link.Link
@@ -111,14 +137,12 @@ type ebpfCollector struct {
netIngressBytes *prometheus.Desc
netEgressPackets *prometheus.Desc
netEgressBytes *prometheus.Desc
-}
-
-func init() {
- RegisterCollector(ebpfCollectorSubsystem, defaultDisabled, NewEbpfCollector)
+ netRetransPackets *prometheus.Desc
+ netRetransBytes *prometheus.Desc
}
// NewEbpfCollector returns a new instance of ebpf collector.
-func NewEbpfCollector(logger log.Logger) (Collector, error) {
+func NewEbpfCollector(logger log.Logger, cgManager *cgroupManager, opts ebpfOpts) (*ebpfCollector, error) {
var netColl, vfsColl *ebpf.Collection
var configMap *ebpf.Map
@@ -127,34 +151,21 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
var err error
- // Atleast one of network or VFS events must be enabled
- if !*collectNetMetrics && !*collectVFSMetrics {
- level.Error(logger).Log("msg", "Enable atleast one of --collector.ebpf.network-metrics or --collector.ebpf.vfs-metrics")
-
- return nil, errors.New("invalid CLI options for ebpf collector")
- }
-
- // Get cgroups based on the enabled collector
- var cgroupFS cgroupFS
- if *collectorState[slurmCollectorSubsystem] {
- cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
- }
-
- // If no cgroupFS set return
- if cgroupFS.root == "" {
- level.Error(logger).Log("msg", "ebpf collector needs slurm collector. Enable it with --collector.slurm")
-
- return nil, ErrInvalidCgroupFS
- }
-
// Remove resource limits for kernels <5.11.
if err := rlimit.RemoveMemlock(); err != nil {
return nil, fmt.Errorf("error removing memlock: %w", err)
}
// Load network programs
- if *collectNetMetrics {
- netColl, err = loadObject("bpf/objs/bpf_network.o")
+ if opts.netStatsEnabled {
+ objFile, err := bpfNetObjs()
+ if err != nil {
+ level.Error(logger).Log("msg", "Failed to get current kernel version", "err", err)
+
+ return nil, err
+ }
+
+ netColl, err = loadObject("bpf/objs/" + objFile)
if err != nil {
level.Error(logger).Log("msg", "Unable to load network bpf objects", "err", err)
@@ -170,7 +181,7 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
}
// Load VFS programs
- if *collectVFSMetrics {
+ if opts.vfsStatsEnabled {
objFile, err := bpfVFSObjs()
if err != nil {
level.Error(logger).Log("msg", "Failed to get current kernel version", "err", err)
@@ -197,7 +208,7 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
// Update config map
var config bpfConfig
- if cgroupFS.mode == cgroups.Unified {
+ if cgManager.mode == cgroups.Unified {
config = bpfConfig{
CgrpSubsysIdx: uint64(0), // Irrelevant for cgroups v2
CgrpFsMagic: uint64(unix.CGROUP2_SUPER_MAGIC),
@@ -212,7 +223,7 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
}
for _, cgroupController := range cgroupControllers {
- if cgroupController.name == strings.TrimSpace(cgroupFS.subsystem) {
+ if cgroupController.name == strings.TrimSpace(cgManager.activeController) {
cgrpSubSysIdx = cgroupController.idx
}
}
@@ -250,6 +261,8 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
if links[kernFuncName], err = link.Kprobe(kernFuncName, prog, nil); err != nil {
level.Error(logger).Log("msg", "Failed to open kprobe", "func", kernFuncName, "err", err)
}
+
+ level.Debug(logger).Log("msg", "kprobe linked", "prog", name, "func", kernFuncName)
}
}
@@ -266,6 +279,8 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
if links[kernFuncName], err = link.Kretprobe(kernFuncName, prog, nil); err != nil {
level.Error(logger).Log("msg", "Failed to open kretprobe", "func", kernFuncName, "err", err)
}
+
+ level.Debug(logger).Log("msg", "kretprobe linked", "prog", name, "func", kernFuncName)
}
}
@@ -278,6 +293,8 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
}); err != nil {
level.Error(logger).Log("msg", "Failed to open fentry", "func", kernFuncName, "err", err)
}
+
+ level.Debug(logger).Log("msg", "fentry linked", "prog", name, "func", kernFuncName)
}
// fexit/* programs
@@ -289,18 +306,21 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
}); err != nil {
level.Error(logger).Log("msg", "Failed to open fexit", "func", kernFuncName, "err", err)
}
+
+ level.Debug(logger).Log("msg", "fexit linked", "prog", name, "func", kernFuncName)
}
}
return &ebpfCollector{
- logger: logger,
- hostname: hostname,
- cgroupFS: cgroupFS,
- inodesMap: make(map[uint64]string),
- inodesRevMap: make(map[string]uint64),
- netColl: netColl,
- vfsColl: vfsColl,
- links: links,
+ logger: logger,
+ hostname: hostname,
+ cgroupManager: cgManager,
+ opts: opts,
+ cgroupIDUUIDCache: make(map[uint64]string),
+ cgroupPathIDCache: make(map[string]uint64),
+ netColl: netColl,
+ vfsColl: vfsColl,
+ links: links,
vfsWriteBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "write_bytes_total"),
"Total number of bytes written from a cgroup in bytes",
@@ -376,25 +396,37 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
netIngressPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "ingress_packets_total"),
"Total number of ingress packets from a cgroup",
- []string{"manager", "hostname", "uuid", "dev"},
+ []string{"manager", "hostname", "uuid", "proto", "family"},
nil,
),
netIngressBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "ingress_bytes_total"),
"Total number of ingress bytes from a cgroup",
- []string{"manager", "hostname", "uuid", "dev"},
+ []string{"manager", "hostname", "uuid", "proto", "family"},
nil,
),
netEgressPackets: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "egress_packets_total"),
"Total number of egress packets from a cgroup",
- []string{"manager", "hostname", "uuid", "dev"},
+ []string{"manager", "hostname", "uuid", "proto", "family"},
nil,
),
netEgressBytes: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "egress_bytes_total"),
"Total number of egress bytes from a cgroup",
- []string{"manager", "hostname", "uuid", "dev"},
+ []string{"manager", "hostname", "uuid", "proto", "family"},
+ nil,
+ ),
+ netRetransPackets: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "retrans_packets_total"),
+ "Total number of retransmission packets from a cgroup",
+ []string{"manager", "hostname", "uuid", "proto", "family"},
+ nil,
+ ),
+ netRetransBytes: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, ebpfCollectorSubsystem, "retrans_bytes_total"),
+ "Total number of retransmission bytes from a cgroup",
+ []string{"manager", "hostname", "uuid", "proto", "family"},
nil,
),
}, nil
@@ -403,13 +435,13 @@ func NewEbpfCollector(logger log.Logger) (Collector, error) {
// Update implements Collector and update job metrics.
func (c *ebpfCollector) Update(ch chan<- prometheus.Metric) error {
// Fetch all active cgroups
- if err := c.getActiveCgroups(); err != nil {
+ if err := c.discoverCgroups(); err != nil {
return err
}
// Start wait group
wg := sync.WaitGroup{}
- wg.Add(7)
+ wg.Add(8)
// Update different metrics in go routines
go func() {
@@ -468,6 +500,14 @@ func (c *ebpfCollector) Update(ch chan<- prometheus.Metric) error {
}
}()
+ go func() {
+ defer wg.Done()
+
+ if err := c.updateNetRetrans(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update network retransmission stats", "err", err)
+ }
+ }()
+
// Wait for all go routines
wg.Wait()
@@ -476,6 +516,8 @@ func (c *ebpfCollector) Update(ch chan<- prometheus.Metric) error {
// Stop releases system resources used by the collector.
func (c *ebpfCollector) Stop(_ context.Context) error {
+ level.Debug(c.logger).Log("msg", "Stopping", "collector", ebpfCollectorSubsystem)
+
// Close all probes
for name, link := range c.links {
if err := link.Close(); err != nil {
@@ -496,30 +538,81 @@ func (c *ebpfCollector) Stop(_ context.Context) error {
return nil
}
-// updateVFSWrite updates VFS write metrics.
-func (c *ebpfCollector) updateVFSWrite(ch chan<- prometheus.Metric) error {
- if c.vfsColl == nil {
- return nil
+// containsMount returns true if any of configured mount points is a substring to mount path
+// returned by map.
+// If there are no mount points configured it returns true to allow all mount points.
+func (c *ebpfCollector) containsMount(mount string) bool {
+ if len(c.opts.vfsMountPoints) <= 0 {
+ return true
+ }
+
+ // Check if any of configured mount points is a sub string
+ // of actual mount point
+ for _, m := range c.opts.vfsMountPoints {
+ if strings.Contains(mount, m) {
+ return true
+ }
}
+ return false
+}
+
+// aggVFSRWStats aggregates the VFS read/write metrics based on UUID.
+func (c *ebpfCollector) aggVFSRWStats(mapName string) (map[promVfsEventKey]bpfVfsRwEvent, error) {
var key bpfVfsEventKey
var value bpfVfsRwEvent
- if m, ok := c.vfsColl.Maps["write_accumulator"]; ok {
- defer m.Close()
+ aggMetric := make(map[promVfsEventKey]bpfVfsRwEvent)
+ if m, ok := c.vfsColl.Maps[mapName]; ok {
entries := m.Iterate()
for entries.Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
+ if slices.Contains(c.activeCgroupIDs, uint64(key.Cid)) {
mount := unix.ByteSliceToString(key.Mnt[:])
- ch <- prometheus.MustNewConstMetric(c.vfsWriteRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsWriteBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsWriteErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
+ if !c.containsMount(mount) {
+ continue
+ }
+
+ promKey := promVfsEventKey{
+ UUID: c.cgroupIDUUIDCache[uint64(key.Cid)],
+ Mount: mount,
+ }
+ if v, ok := aggMetric[promKey]; ok {
+ aggMetric[promKey] = bpfVfsRwEvent{
+ Calls: v.Calls + value.Calls,
+ Bytes: v.Bytes + value.Bytes,
+ Errors: v.Errors + value.Errors,
+ }
+ } else {
+ aggMetric[promKey] = value
+ }
}
}
+ } else {
+ return nil, errMapNotFound
+ }
+
+ return aggMetric, nil
+}
+
+// updateVFSWrite updates VFS write metrics.
+func (c *ebpfCollector) updateVFSWrite(ch chan<- prometheus.Metric) error {
+ if c.vfsColl == nil {
+ return nil
+ }
+
+ // Aggregate metrics
+ aggMetric, err := c.aggVFSRWStats("write_accumulator")
+ if err != nil {
+ return err
+ }
+
+ // Update metrics to the channel
+ for key, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsWriteErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
}
return nil
@@ -531,27 +624,50 @@ func (c *ebpfCollector) updateVFSRead(ch chan<- prometheus.Metric) error {
return nil
}
- var key bpfVfsEventKey
+ // Aggregate metrics
+ aggMetric, err := c.aggVFSRWStats("read_accumulator")
+ if err != nil {
+ return err
+ }
- var value bpfVfsRwEvent
+ // Update metrics to the channel
+ for key, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.vfsReadRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
+ ch <- prometheus.MustNewConstMetric(c.vfsReadErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupManager.manager, c.hostname, key.UUID, key.Mount)
+ }
- if m, ok := c.vfsColl.Maps["read_accumulator"]; ok {
- defer m.Close()
+ return nil
+}
+
+// aggVFSInodeStats aggregates the VFS inode metrics based on UUID.
+func (c *ebpfCollector) aggVFSInodeStats(mapName string) (map[string]bpfVfsInodeEvent, error) {
+ var key uint32
+
+ var value bpfVfsInodeEvent
+ aggMetric := make(map[string]bpfVfsInodeEvent)
+
+ if m, ok := c.vfsColl.Maps[mapName]; ok {
entries := m.Iterate()
for entries.Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- mount := unix.ByteSliceToString(key.Mnt[:])
- ch <- prometheus.MustNewConstMetric(c.vfsReadRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsReadBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, mount)
- ch <- prometheus.MustNewConstMetric(c.vfsReadErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid, mount)
+ if slices.Contains(c.activeCgroupIDs, uint64(key)) {
+ uuid := c.cgroupIDUUIDCache[uint64(key)]
+ if v, ok := aggMetric[uuid]; ok {
+ aggMetric[uuid] = bpfVfsInodeEvent{
+ Calls: v.Calls + value.Calls,
+ Errors: v.Errors + value.Errors,
+ }
+ } else {
+ aggMetric[uuid] = value
+ }
}
}
+ } else {
+ return nil, errMapNotFound
}
- return nil
+ return aggMetric, nil
}
// updateVFSOpen updates VFS open stats.
@@ -560,22 +676,16 @@ func (c *ebpfCollector) updateVFSOpen(ch chan<- prometheus.Metric) error {
return nil
}
- var key uint32
-
- var value bpfVfsInodeEvent
-
- if m, ok := c.vfsColl.Maps["open_accumulator"]; ok {
- defer m.Close()
+ // Aggregate metrics
+ aggMetric, err := c.aggVFSInodeStats("open_accumulator")
+ if err != nil {
+ return err
+ }
- entries := m.Iterate()
- for entries.Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
- }
- }
+ // Update metrics to the channel
+ for uuid, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupManager.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupManager.manager, c.hostname, uuid)
}
return nil
@@ -587,22 +697,16 @@ func (c *ebpfCollector) updateVFSCreate(ch chan<- prometheus.Metric) error {
return nil
}
- var key uint32
-
- var value bpfVfsInodeEvent
-
- if m, ok := c.vfsColl.Maps["create_accumulator"]; ok {
- defer m.Close()
+ // Aggregate metrics
+ aggMetric, err := c.aggVFSInodeStats("create_accumulator")
+ if err != nil {
+ return err
+ }
- entries := m.Iterate()
- for entries.Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
- }
- }
+ // Update metrics to the channel
+ for uuid, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.vfsCreateRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupManager.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsCreateErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupManager.manager, c.hostname, uuid)
}
return nil
@@ -614,25 +718,53 @@ func (c *ebpfCollector) updateVFSUnlink(ch chan<- prometheus.Metric) error {
return nil
}
- var key uint32
+ // Aggregate metrics
+ aggMetric, err := c.aggVFSInodeStats("unlink_accumulator")
+ if err != nil {
+ return err
+ }
- var value bpfVfsInodeEvent
+ // Update metrics to the channel
+ for uuid, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.vfsUnlinkRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupManager.manager, c.hostname, uuid)
+ ch <- prometheus.MustNewConstMetric(c.vfsUnlinkErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupManager.manager, c.hostname, uuid)
+ }
+
+ return nil
+}
+
+// aggNetStats aggregates the network metrics based on UUID.
+func (c *ebpfCollector) aggNetStats(mapName string) (map[promNetEventKey]bpfNetEvent, error) {
+ var key bpfNetEventKey
+
+ var value bpfNetEvent
- if m, ok := c.vfsColl.Maps["unlink_accumulator"]; ok {
- defer m.Close()
+ aggMetric := make(map[promNetEventKey]bpfNetEvent)
+ if m, ok := c.netColl.Maps[mapName]; ok {
entries := m.Iterate()
for entries.Next(&key, &value) {
- cgroupID := uint64(key)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- ch <- prometheus.MustNewConstMetric(c.vfsOpenRequests, prometheus.CounterValue, float64(value.Calls), c.cgroupFS.manager, c.hostname, uuid)
- ch <- prometheus.MustNewConstMetric(c.vfsOpenErrors, prometheus.CounterValue, float64(value.Errors), c.cgroupFS.manager, c.hostname, uuid)
+ if slices.Contains(c.activeCgroupIDs, uint64(key.Cid)) {
+ promKey := promNetEventKey{
+ UUID: c.cgroupIDUUIDCache[uint64(key.Cid)],
+ Proto: protoMap[int(key.Proto)],
+ Family: familyMap[int(key.Fam)],
+ }
+ if v, ok := aggMetric[promKey]; ok {
+ aggMetric[promKey] = bpfNetEvent{
+ Packets: v.Packets + value.Packets,
+ Bytes: v.Bytes + value.Bytes,
+ }
+ } else {
+ aggMetric[promKey] = value
+ }
}
}
+ } else {
+ return nil, errMapNotFound
}
- return nil
+ return aggMetric, nil
}
// updateNetIngress updates network ingress stats.
@@ -641,23 +773,16 @@ func (c *ebpfCollector) updateNetIngress(ch chan<- prometheus.Metric) error {
return nil
}
- var key bpfNetEventKey
-
- var value bpfNetEvent
-
- if m, ok := c.netColl.Maps["ingress_accumulator"]; ok {
- defer m.Close()
+ // Aggregate metrics
+ aggMetric, err := c.aggNetStats("ingress_accumulator")
+ if err != nil {
+ return err
+ }
- entries := m.Iterate()
- for entries.Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- device := unix.ByteSliceToString(key.Dev[:])
- ch <- prometheus.MustNewConstMetric(c.netIngressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
- ch <- prometheus.MustNewConstMetric(c.netIngressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
- }
- }
+ // Update metrics to the channel
+ for key, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.netIngressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
+ ch <- prometheus.MustNewConstMetric(c.netIngressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
}
return nil
@@ -669,48 +794,64 @@ func (c *ebpfCollector) updateNetEgress(ch chan<- prometheus.Metric) error {
return nil
}
- var key bpfNetEventKey
+ // Aggregate metrics
+ aggMetric, err := c.aggNetStats("egress_accumulator")
+ if err != nil {
+ return err
+ }
- var value bpfNetEvent
+ // Update metrics to the channel
+ for key, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.netEgressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
+ ch <- prometheus.MustNewConstMetric(c.netEgressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
+ }
- if m, ok := c.netColl.Maps["egress_accumulator"]; ok {
- defer m.Close()
+ return nil
+}
- entries := m.Iterate()
- for entries.Next(&key, &value) {
- cgroupID := uint64(key.Cid)
- if slices.Contains(c.activeCgroups, cgroupID) {
- uuid := c.inodesMap[cgroupID]
- device := unix.ByteSliceToString(key.Dev[:])
- ch <- prometheus.MustNewConstMetric(c.netEgressPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupFS.manager, c.hostname, uuid, device)
- ch <- prometheus.MustNewConstMetric(c.netEgressBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupFS.manager, c.hostname, uuid, device)
- }
- }
+// updateNetRetrans updates network retransmission stats.
+func (c *ebpfCollector) updateNetRetrans(ch chan<- prometheus.Metric) error {
+ if c.netColl == nil {
+ return nil
+ }
+
+ // Aggregate metrics
+ aggMetric, err := c.aggNetStats("retrans_accumulator")
+ if err != nil {
+ return err
+ }
+
+ // Update metrics to the channel
+ for key, value := range aggMetric {
+ ch <- prometheus.MustNewConstMetric(c.netRetransPackets, prometheus.CounterValue, float64(value.Packets), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
+ ch <- prometheus.MustNewConstMetric(c.netRetransBytes, prometheus.CounterValue, float64(value.Bytes), c.cgroupManager.manager, c.hostname, key.UUID, key.Proto, key.Family)
}
return nil
}
-func (c *ebpfCollector) getActiveCgroups() error {
- // Get currently active jobs and set them in activeJobs state variable
- var activeUUIDs []string
+func (c *ebpfCollector) discoverCgroups() error {
+ // Get currently active uuids and cgroup paths to evict older entries in caches
+ var activeCgroupUUIDs []string
+
+ var activeCgroupPaths []string
// Reset activeCgroups from last scrape
- c.activeCgroups = make([]uint64, 0)
+ c.activeCgroupIDs = make([]uint64, 0)
// Walk through all cgroups and get cgroup paths
- if err := filepath.WalkDir(c.cgroupFS.mount, func(p string, info fs.DirEntry, err error) error {
+ if err := filepath.WalkDir(c.cgroupManager.mountPoint, func(p string, info fs.DirEntry, err error) error {
if err != nil {
return err
}
// Ignore irrelevant cgroup paths
- if !info.IsDir() || c.cgroupFS.pathFilter(p) {
+ if !info.IsDir() {
return nil
}
// Get cgroup ID
- cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(p)
+ cgroupIDMatches := c.cgroupManager.idRegex.FindStringSubmatch(p)
if len(cgroupIDMatches) <= 1 {
return nil
}
@@ -722,37 +863,42 @@ func (c *ebpfCollector) getActiveCgroups() error {
return nil
}
- // Check if we already passed through this cgroup
- if slices.Contains(activeUUIDs, uuid) {
- return nil
- }
-
- // Get inode of the cgroup
- if _, ok := c.inodesRevMap[uuid]; !ok {
+ // Get inode of the cgroup path if not already present in the cache
+ if _, ok := c.cgroupPathIDCache[p]; !ok {
if inode, err := inode(p); err == nil {
- c.inodesRevMap[uuid] = inode
- c.inodesMap[inode] = p
+ c.cgroupPathIDCache[p] = inode
+ c.cgroupIDUUIDCache[inode] = uuid
}
}
+ if _, ok := c.cgroupIDUUIDCache[c.cgroupPathIDCache[p]]; !ok {
+ c.cgroupIDUUIDCache[c.cgroupPathIDCache[p]] = uuid
+ }
- activeUUIDs = append(activeUUIDs, uuid)
- c.activeCgroups = append(c.activeCgroups, c.inodesRevMap[uuid])
+ // Populate activeCgroupUUIDs, activeCgroupIDs and activeCgroupPaths
+ activeCgroupPaths = append(activeCgroupPaths, p)
+ activeCgroupUUIDs = append(activeCgroupUUIDs, uuid)
+ c.activeCgroupIDs = append(c.activeCgroupIDs, c.cgroupPathIDCache[p])
level.Debug(c.logger).Log("msg", "cgroup path", "path", p)
return nil
}); err != nil {
level.Error(c.logger).
- Log("msg", "Error walking cgroup subsystem", "path", c.cgroupFS.mount, "err", err)
+ Log("msg", "Error walking cgroup subsystem", "path", c.cgroupManager.mountPoint, "err", err)
return err
}
- // Remove expired uuids from inodeMap and inodeRevMap
- for uuid, inode := range c.inodesRevMap {
- if !slices.Contains(activeUUIDs, uuid) {
- delete(c.inodesRevMap, uuid)
- delete(c.inodesMap, inode)
+ // Evict older entries from caches
+ for cid, uuid := range c.cgroupIDUUIDCache {
+ if !slices.Contains(activeCgroupUUIDs, uuid) {
+ delete(c.cgroupIDUUIDCache, cid)
+ }
+ }
+
+ for path := range c.cgroupPathIDCache {
+ if !slices.Contains(activeCgroupPaths, path) {
+ delete(c.cgroupPathIDCache, path)
}
}
@@ -777,6 +923,24 @@ func bpfVFSObjs() (string, error) {
}
}
+// bpfNetObjs returns the network bpf objects based on current kernel version.
+func bpfNetObjs() (string, error) {
+ // Get current kernel version
+ currentKernelVer, err := KernelVersion()
+ if err != nil {
+ return "", err
+ }
+
+ // Return appropriate bpf object file based on kernel version
+ if currentKernelVer > KernelStringToNumeric("6.4") {
+ return "bpf_network.o", nil
+ } else if currentKernelVer >= KernelStringToNumeric("5.19") && currentKernelVer <= KernelStringToNumeric("6.4") {
+ return "bpf_network_v64.o", nil
+ } else {
+ return "bpf_network_v519.o", nil
+ }
+}
+
// loadObject loads a BPF ELF file and returns a Collection.
func loadObject(path string) (*ebpf.Collection, error) {
// Read ELF file
diff --git a/pkg/collector/ebpf_test.go b/pkg/collector/ebpf_test.go
index bfa681a1..33f0fef8 100644
--- a/pkg/collector/ebpf_test.go
+++ b/pkg/collector/ebpf_test.go
@@ -2,9 +2,12 @@ package collector
import (
"context"
+ "os"
"os/user"
+ "slices"
"testing"
+ "github.com/containerd/cgroups/v3"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
@@ -158,13 +161,22 @@ func TestNewEbpfCollector(t *testing.T) {
_, err := CEEMSExporterApp.Parse(
[]string{
"--path.cgroupfs", "testdata/sys/fs/cgroup",
- "--collector.slurm",
- "--collector.slurm.force-cgroups-version", "v2",
+ "--collector.cgroups.force-version", "v2",
},
)
require.NoError(t, err)
- collector, err := NewEbpfCollector(log.NewNopLogger())
+ // cgroup manager
+ cgManager, err := NewCgroupManager("slurm")
+ require.NoError(t, err)
+
+ // ebpf opts
+ opts := ebpfOpts{
+ vfsStatsEnabled: true,
+ netStatsEnabled: true,
+ }
+
+ collector, err := NewEbpfCollector(log.NewNopLogger(), cgManager, opts)
require.NoError(t, err)
// Setup background goroutine to capture metrics.
@@ -193,25 +205,42 @@ func TestActiveCgroupsV2(t *testing.T) {
)
require.NoError(t, err)
+ // cgroup manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ mountPoint: "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope",
+ idRegex: slurmCgroupPathRegex,
+ }
+
+ // ebpf opts
+ opts := ebpfOpts{
+ vfsStatsEnabled: true,
+ netStatsEnabled: true,
+ }
+
c := ebpfCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
- logger: log.NewNopLogger(),
- inodesMap: make(map[uint64]string),
- inodesRevMap: make(map[string]uint64),
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+
+ opts: opts,
+ cgroupIDUUIDCache: make(map[uint64]string),
+ cgroupPathIDCache: make(map[string]uint64),
}
// Get active cgroups
- err = c.getActiveCgroups()
+ err = c.discoverCgroups()
require.NoError(t, err)
- assert.Len(t, c.activeCgroups, 3)
- assert.Len(t, c.inodesMap, 3)
- assert.Len(t, c.inodesRevMap, 3)
+ assert.Len(t, c.activeCgroupIDs, 39)
+ assert.Len(t, c.cgroupIDUUIDCache, 39)
+ assert.Len(t, c.cgroupPathIDCache, 39)
// Get cgroup IDs
var uuids []string
- for uuid := range c.inodesRevMap {
- uuids = append(uuids, uuid)
+ for _, uuid := range c.cgroupIDUUIDCache {
+ if !slices.Contains(uuids, uuid) {
+ uuids = append(uuids, uuid)
+ }
}
assert.ElementsMatch(t, []string{"1009248", "1009249", "1009250"}, uuids)
@@ -225,26 +254,123 @@ func TestActiveCgroupsV1(t *testing.T) {
)
require.NoError(t, err)
+ // cgroup manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Legacy,
+ mountPoint: "testdata/sys/fs/cgroup/cpuacct/slurm",
+ idRegex: slurmCgroupPathRegex,
+ }
+
+ // ebpf opts
+ opts := ebpfOpts{
+ vfsStatsEnabled: true,
+ netStatsEnabled: true,
+ }
+
c := ebpfCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
- logger: log.NewNopLogger(),
- inodesMap: make(map[uint64]string),
- inodesRevMap: make(map[string]uint64),
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+
+ opts: opts,
+ cgroupIDUUIDCache: make(map[uint64]string),
+ cgroupPathIDCache: make(map[string]uint64),
}
// Get active cgroups
- err = c.getActiveCgroups()
+ err = c.discoverCgroups()
require.NoError(t, err)
- assert.Len(t, c.activeCgroups, 3)
- assert.Len(t, c.inodesMap, 3)
- assert.Len(t, c.inodesRevMap, 3)
+ assert.Len(t, c.activeCgroupIDs, 6)
+ assert.Len(t, c.cgroupIDUUIDCache, 6)
+ assert.Len(t, c.cgroupPathIDCache, 6)
// Get cgroup IDs
var uuids []string
- for uuid := range c.inodesRevMap {
- uuids = append(uuids, uuid)
+ for _, uuid := range c.cgroupIDUUIDCache {
+ if !slices.Contains(uuids, uuid) {
+ uuids = append(uuids, uuid)
+ }
}
assert.ElementsMatch(t, []string{"1009248", "1009249", "1009250"}, uuids)
}
+
+func TestVFSBPFObjects(t *testing.T) {
+ tests := []struct {
+ name string
+ procfs string
+ version string
+ obj string
+ }{
+ {
+ name: "kernel >= 6.2",
+ procfs: t.TempDir(),
+ version: "Ubuntu 6.5.0-35.35~22.04.1-generic 6.5.13",
+ obj: "bpf_vfs.o",
+ },
+ {
+ name: "kernel > 5.11 and kernel < 6.2",
+ procfs: t.TempDir(),
+ version: "Ubuntu 5.19.0-35.35~22.04.1-generic 5.19.13",
+ obj: "bpf_vfs_v62.o",
+ },
+ {
+ name: "kernel < 5.11",
+ procfs: t.TempDir(),
+ version: "Ubuntu 5.6.0-35.35~22.04.1-generic 5.6.13",
+ obj: "bpf_vfs_v511.o",
+ },
+ }
+
+ for _, test := range tests {
+ err := os.WriteFile(test.procfs+"/version_signature", []byte(test.version), 0o600)
+ require.NoError(t, err)
+
+ *procfsPath = test.procfs
+
+ obj, err := bpfVFSObjs()
+ require.NoError(t, err)
+
+ assert.Equal(t, test.obj, obj, test.name)
+ }
+}
+
+func TestNetBPFObjects(t *testing.T) {
+ tests := []struct {
+ name string
+ procfs string
+ version string
+ obj string
+ }{
+ {
+ name: "kernel >= 6.5",
+ procfs: t.TempDir(),
+ version: "Ubuntu 6.9.0-35.35~22.04.1-generic 6.5.13",
+ obj: "bpf_network.o",
+ },
+ {
+ name: "kernel > 5.19 and kernel < 6.5",
+ procfs: t.TempDir(),
+ version: "Ubuntu 5.27.0-35.35~22.04.1-generic 5.19.13",
+ obj: "bpf_network_v64.o",
+ },
+ {
+ name: "kernel < 5.19",
+ procfs: t.TempDir(),
+ version: "Ubuntu 5.6.0-35.35~22.04.1-generic 5.6.13",
+ obj: "bpf_network_v519.o",
+ },
+ }
+
+ for _, test := range tests {
+ err := os.WriteFile(test.procfs+"/version_signature", []byte(test.version), 0o600)
+ require.NoError(t, err)
+
+ *procfsPath = test.procfs
+
+ obj, err := bpfNetObjs()
+ require.NoError(t, err)
+
+ assert.Equal(t, test.obj, obj, test.name)
+ }
+}
diff --git a/pkg/collector/helper.go b/pkg/collector/helper.go
index b6bb4824..8352e4d0 100644
--- a/pkg/collector/helper.go
+++ b/pkg/collector/helper.go
@@ -168,12 +168,9 @@ func GetNvidiaGPUDevices(nvidiaSmiPath string, logger log.Logger) (map[int]Devic
nvidiaSmiOutput, err := osexec.Execute(nvidiaSmiCmd, args, nil, logger)
if err != nil {
- level.Error(logger).
- Log("msg", "nvidia-smi command to get list of devices failed", "err", err)
-
return nil, err
}
- // Get all devices
+
return parseNvidiaSmiOutput(string(nvidiaSmiOutput), logger), nil
}
@@ -223,14 +220,13 @@ func GetAMDGPUDevices(rocmSmiPath string, logger log.Logger) (map[int]Device, er
if rocmSmiPath != "" {
if _, err := os.Stat(rocmSmiPath); err != nil {
- level.Error(logger).Log("msg", "Failed to open rocm-smi executable", "path", rocmSmiPath, "err", err)
-
return nil, err
}
rocmSmiCmd = rocmSmiPath
} else {
rocmSmiCmd = "rocm-smi"
+
if _, err := exec.LookPath(rocmSmiCmd); err != nil {
return nil, err
}
@@ -241,12 +237,9 @@ func GetAMDGPUDevices(rocmSmiPath string, logger log.Logger) (map[int]Device, er
rocmSmiOutput, err := osexec.Execute(rocmSmiCmd, args, nil, logger)
if err != nil {
- level.Error(logger).
- Log("msg", "rocm-smi command to get list of devices failed", "err", err)
-
return nil, err
}
- // Get all devices
+
return parseAmdSmioutput(string(rocmSmiOutput), logger), nil
}
diff --git a/pkg/collector/perf.go b/pkg/collector/perf.go
index efff1489..9a82b276 100644
--- a/pkg/collector/perf.go
+++ b/pkg/collector/perf.go
@@ -55,104 +55,58 @@ var (
}
)
+// Lock to update cgroupsProcMap.
+var (
+ mapLock = sync.RWMutex{}
+)
+
+type perfOpts struct {
+ perfHwProfilersEnabled bool
+ perfSwProfilersEnabled bool
+ perfCacheProfilersEnabled bool
+ perfHwProfilers []string
+ perfSwProfilers []string
+ perfCacheProfilers []string
+ targetEnvVars []string
+}
+
// perfCollector is a Collector that uses the perf subsystem to collect
// metrics. It uses perf_event_open an ioctls for profiling. Due to the fact
// that the perf subsystem is highly dependent on kernel configuration and
// settings not all profiler values may be exposed on the target system at any
// given time.
type perfCollector struct {
- fs procfs.FS
-
- envVar string
-
- perfHwProfilersEnabled bool
- perfSwProfilersEnabled bool
- perfCacheProfilersEnabled bool
-
- perfHwProfilers sync.Map
- perfSwProfilers sync.Map
- perfCacheProfilers sync.Map
-
+ logger log.Logger
+ hostname string
+ cgroupManager *cgroupManager
+ fs procfs.FS
+ opts perfOpts
+ perfHwProfilers map[int]*perf.HardwareProfiler
+ perfSwProfilers map[int]*perf.SoftwareProfiler
+ perfCacheProfilers map[int]*perf.CacheProfiler
perfHwProfilerTypes perf.HardwareProfilerType
perfSwProfilerTypes perf.SoftwareProfilerType
perfCacheProfilerTypes perf.CacheProfilerType
-
- cgroupFS cgroupFS
-
- desc map[string]*prometheus.Desc
-
- hostname string
- manager string
-
- logger log.Logger
+ desc map[string]*prometheus.Desc
}
-func init() {
- RegisterCollector(perfCollectorSubsystem, defaultDisabled, NewPerfCollector)
-}
-
-// CLI options.
-var (
- perfProfilersEnvVars = CEEMSExporterApp.Flag(
- "collector.perf.env-var",
- "Processes having this environment variable set will be profiled. If empty, all the relevant processes will be profiled.",
- ).String()
- perfHwProfilersFlag = CEEMSExporterApp.Flag(
- "collector.perf.enable-hardware-profilers",
- "Enables perf hardware profilers (default: enabled)",
- ).Default("true").Bool()
- perfHwProfilers = CEEMSExporterApp.Flag(
- "collector.perf.hardware-profilers",
- "perf hardware profilers to collect",
- ).Strings()
- perfSwProfilersFlag = CEEMSExporterApp.Flag(
- "collector.perf.enable-software-profilers",
- "Enables perf software profilers (default: enabled)",
- ).Default("true").Bool()
- perfSwProfilers = CEEMSExporterApp.Flag(
- "collector.perf.software-profilers",
- "perf software profilers to collect",
- ).Strings()
- perfCacheProfilersFlag = CEEMSExporterApp.Flag(
- "collector.perf.enable-cache-profilers",
- "Enables perf cache profilers (default: disabled)",
- ).Default("false").Bool()
- perfCacheProfilers = CEEMSExporterApp.Flag(
- "collector.perf.cache-profilers",
- "perf cache profilers to collect",
- ).Strings()
-)
-
// NewPerfCollector returns a new perf based collector, it creates a profiler
// per compute unit.
-func NewPerfCollector(logger log.Logger) (Collector, error) {
- // Get cgroup file system
- var cgroupFS cgroupFS
- if *collectorState[slurmCollectorSubsystem] {
- cgroupFS = slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
- }
-
- // If no cgroupFS set return
- if cgroupFS.root == "" {
- level.Error(logger).Log("msg", "ebpf collector needs slurm collector. Enable it with --collector.slurm")
-
- return nil, ErrInvalidCgroupFS
- }
-
+func NewPerfCollector(logger log.Logger, cgManager *cgroupManager, opts perfOpts) (*perfCollector, error) {
collector := &perfCollector{
- logger: logger,
- hostname: hostname,
- cgroupFS: cgroupFS,
- envVar: *perfProfilersEnvVars,
- perfHwProfilersEnabled: *perfHwProfilersFlag,
- perfSwProfilersEnabled: *perfSwProfilersFlag,
- perfCacheProfilersEnabled: *perfCacheProfilersFlag,
+ logger: logger,
+ hostname: hostname,
+ cgroupManager: cgManager,
+ opts: opts,
+ perfHwProfilers: make(map[int]*perf.HardwareProfiler),
+ perfSwProfilers: make(map[int]*perf.SoftwareProfiler),
+ perfCacheProfilers: make(map[int]*perf.CacheProfiler),
}
// Configure perf profilers
collector.perfHwProfilerTypes = perf.AllHardwareProfilers
- if collector.perfHwProfilersEnabled && len(*perfHwProfilers) > 0 {
- for _, hf := range *perfHwProfilers {
+ if collector.opts.perfHwProfilersEnabled && len(collector.opts.perfHwProfilers) > 0 {
+ for _, hf := range collector.opts.perfHwProfilers {
if v, ok := perfHardwareProfilerMap[hf]; ok {
collector.perfHwProfilerTypes |= v
}
@@ -160,8 +114,8 @@ func NewPerfCollector(logger log.Logger) (Collector, error) {
}
collector.perfSwProfilerTypes = perf.AllSoftwareProfilers
- if collector.perfSwProfilersEnabled && len(*perfSwProfilers) > 0 {
- for _, sf := range *perfSwProfilers {
+ if collector.opts.perfSwProfilersEnabled && len(collector.opts.perfSwProfilers) > 0 {
+ for _, sf := range collector.opts.perfSwProfilers {
if v, ok := perfSoftwareProfilerMap[sf]; ok {
collector.perfSwProfilerTypes |= v
}
@@ -169,8 +123,8 @@ func NewPerfCollector(logger log.Logger) (Collector, error) {
}
collector.perfCacheProfilerTypes = perf.AllCacheProfilers
- if collector.perfCacheProfilersEnabled && len(*perfCacheProfilers) > 0 {
- for _, cf := range *perfCacheProfilers {
+ if collector.opts.perfCacheProfilersEnabled && len(collector.opts.perfCacheProfilers) > 0 {
+ for _, cf := range collector.opts.perfCacheProfilers {
if v, ok := perfCacheProfilerMap[cf]; ok {
collector.perfCacheProfilerTypes |= v
}
@@ -488,7 +442,7 @@ func (c *perfCollector) Stop(_ context.Context) error {
// updateHardwareCounters collects hardware counters for the given cgroup.
func (c *perfCollector) updateHardwareCounters(cgroupID string, procs []procfs.Proc, ch chan<- prometheus.Metric) error {
- if !c.perfHwProfilersEnabled {
+ if !c.opts.perfHwProfilersEnabled {
return nil
}
@@ -501,14 +455,12 @@ func (c *perfCollector) updateHardwareCounters(cgroupID string, procs []procfs.P
for _, proc := range procs {
pid = proc.PID
- if profiler, ok := c.perfHwProfilers.Load(pid); ok {
+ if hwProfiler, ok := c.perfHwProfilers[pid]; ok {
hwProfile := &perf.HardwareProfile{}
- if hwProfiler, ok := profiler.(*perf.HardwareProfiler); ok {
- if err := (*hwProfiler).Profile(hwProfile); err != nil {
- errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
+ if err := (*hwProfiler).Profile(hwProfile); err != nil {
+ errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
- continue
- }
+ continue
}
if hwProfile.CPUCycles != nil {
@@ -546,7 +498,7 @@ func (c *perfCollector) updateHardwareCounters(cgroupID string, procs []procfs.P
ch <- prometheus.MustNewConstMetric(
c.desc[counter],
prometheus.CounterValue, value,
- c.manager, c.hostname, cgroupID,
+ c.cgroupManager.manager, c.hostname, cgroupID,
)
}
}
@@ -556,7 +508,7 @@ func (c *perfCollector) updateHardwareCounters(cgroupID string, procs []procfs.P
// updateSoftwareCounters collects software counters for the given cgroup.
func (c *perfCollector) updateSoftwareCounters(cgroupID string, procs []procfs.Proc, ch chan<- prometheus.Metric) error {
- if !c.perfSwProfilersEnabled {
+ if !c.opts.perfSwProfilersEnabled {
return nil
}
@@ -569,14 +521,12 @@ func (c *perfCollector) updateSoftwareCounters(cgroupID string, procs []procfs.P
for _, proc := range procs {
pid = proc.PID
- if profiler, ok := c.perfSwProfilers.Load(pid); ok {
+ if swProfiler, ok := c.perfSwProfilers[pid]; ok {
swProfile := &perf.SoftwareProfile{}
- if swProfiler, ok := profiler.(*perf.SoftwareProfiler); ok {
- if err := (*swProfiler).Profile(swProfile); err != nil {
- errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
+ if err := (*swProfiler).Profile(swProfile); err != nil {
+ errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
- continue
- }
+ continue
}
if swProfile.PageFaults != nil {
@@ -606,7 +556,7 @@ func (c *perfCollector) updateSoftwareCounters(cgroupID string, procs []procfs.P
ch <- prometheus.MustNewConstMetric(
c.desc[counter],
prometheus.CounterValue, value,
- c.manager, c.hostname, cgroupID,
+ c.cgroupManager.manager, c.hostname, cgroupID,
)
}
}
@@ -616,7 +566,7 @@ func (c *perfCollector) updateSoftwareCounters(cgroupID string, procs []procfs.P
// updateCacheCounters collects cache counters for the given cgroup.
func (c *perfCollector) updateCacheCounters(cgroupID string, procs []procfs.Proc, ch chan<- prometheus.Metric) error {
- if !c.perfCacheProfilersEnabled {
+ if !c.opts.perfCacheProfilersEnabled {
return nil
}
@@ -629,14 +579,12 @@ func (c *perfCollector) updateCacheCounters(cgroupID string, procs []procfs.Proc
for _, proc := range procs {
pid = proc.PID
- if profiler, ok := c.perfCacheProfilers.Load(pid); ok {
+ if cacheProfiler, ok := c.perfCacheProfilers[pid]; ok {
cacheProfile := &perf.CacheProfile{}
- if cacheProfiler, ok := profiler.(*perf.CacheProfiler); ok {
- if err := (*cacheProfiler).Profile(cacheProfile); err != nil {
- errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
+ if err := (*cacheProfiler).Profile(cacheProfile); err != nil {
+ errs = errors.Join(errs, fmt.Errorf("%w: %d", err, pid))
- continue
- }
+ continue
}
if cacheProfile.L1DataReadHit != nil {
@@ -694,7 +642,7 @@ func (c *perfCollector) updateCacheCounters(cgroupID string, procs []procfs.Proc
ch <- prometheus.MustNewConstMetric(
c.desc[counter],
prometheus.CounterValue, value,
- c.manager, c.hostname, cgroupID,
+ c.cgroupManager.manager, c.hostname, cgroupID,
)
}
}
@@ -702,8 +650,11 @@ func (c *perfCollector) updateCacheCounters(cgroupID string, procs []procfs.Proc
return errs
}
-// discoverProcess discovers all active processes by walking through procsfs and returns
-// a map of cgroup ID to procs.
+// discoverProcess returns a map of cgroup ID to procs by looking at each process
+// in proc FS. Walking through cgroup
+// fs is not really an option here as cgroups v1 wont have all PIDs of cgroup
+// if the PID controller is not turned on.
+// The current implementation should work for both cgroups v1 and v2.
func (c *perfCollector) discoverProcess() (map[string][]procfs.Proc, error) {
allProcs, err := c.fs.AllProcs()
if err != nil {
@@ -714,70 +665,86 @@ func (c *perfCollector) discoverProcess() (map[string][]procfs.Proc, error) {
cgroupIDProcMap := make(map[string][]procfs.Proc)
- var cgroupID string
+ wg := sync.WaitGroup{}
+ wg.Add(allProcs.Len())
for _, proc := range allProcs {
- // if envVar is not empty check if this env vars is present for the process
- // We dont check for the value of env var. Presence of env var is enough to
- // trigger the profiling of that process
- if c.envVar != "" {
- environ, err := proc.Environ()
- if err != nil {
- continue
- }
+ go func(p procfs.Proc) {
+ defer wg.Done()
- for _, env := range environ {
- if strings.HasPrefix(env, c.envVar) {
- goto check_process
+ // if targetEnvVars is not empty check if this env vars is present for the process
+ // We dont check for the value of env var. Presence of env var is enough to
+ // trigger the profiling of that process
+ if len(c.opts.targetEnvVars) > 0 {
+ environ, err := p.Environ()
+ if err != nil {
+ return
}
- }
-
- // If env var is not found on process, ignore it
- continue
- }
- check_process:
+ for _, env := range environ {
+ for _, targetEnvVar := range c.opts.targetEnvVars {
+ if strings.HasPrefix(env, targetEnvVar) {
+ goto check_process
+ }
+ }
+ }
- // Ignore processes where command line matches the regex
- if c.cgroupFS.procFilter != nil {
- procCmdLine, err := proc.CmdLine()
- if err != nil || len(procCmdLine) == 0 {
- continue
+ // If target env var(s) is not found, return
+ return
}
- // Ignore process if matches found
- if c.cgroupFS.procFilter(strings.Join(procCmdLine, " ")) {
- continue
- }
- }
+ check_process:
- // Get cgroup ID from regex
- if c.cgroupFS.idRegex != nil {
- cgroups, err := proc.Cgroups()
- if err != nil || len(cgroups) == 0 {
- continue
+ // Ignore processes where command line matches the regex
+ if c.cgroupManager.procFilter != nil {
+ procCmdLine, err := p.CmdLine()
+ if err != nil || len(procCmdLine) == 0 {
+ return
+ }
+
+ // Ignore process if matches found
+ if c.cgroupManager.procFilter(strings.Join(procCmdLine, " ")) {
+ return
+ }
}
- for _, cgroup := range cgroups {
- cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(cgroup.Path)
- if len(cgroupIDMatches) <= 1 {
- continue
+ // Get cgroup ID from regex
+ var cgroupID string
+
+ if c.cgroupManager.idRegex != nil {
+ cgroups, err := p.Cgroups()
+ if err != nil || len(cgroups) == 0 {
+ return
}
- cgroupID = cgroupIDMatches[1]
+ for _, cgroup := range cgroups {
+ cgroupIDMatches := c.cgroupManager.idRegex.FindStringSubmatch(cgroup.Path)
+ if len(cgroupIDMatches) <= 1 {
+ continue
+ }
+
+ cgroupID = cgroupIDMatches[1]
- break
+ break
+ }
}
- }
- // If no cgroupID found, ignore
- if cgroupID == "" {
- continue
- }
+ // If no cgroupID found, ignore
+ if cgroupID == "" {
+ return
+ }
- cgroupIDProcMap[cgroupID] = append(cgroupIDProcMap[cgroupID], proc)
+ mapLock.Lock()
+ cgroupIDProcMap[cgroupID] = append(cgroupIDProcMap[cgroupID], p)
+ mapLock.Unlock()
+ }(proc)
}
+ // Wait for all go routines
+ wg.Wait()
+
+ level.Debug(c.logger).Log("msg", "Discovered cgroups and procs for profiling", "map", cgroupIDProcMap)
+
return cgroupIDProcMap, nil
}
@@ -796,32 +763,32 @@ func (c *perfCollector) newProfilers(cgroupIDProcMap map[string][]procfs.Proc) [
cmdLine = []string{err.Error()}
}
- if c.perfHwProfilersEnabled {
- if _, ok := c.perfHwProfilers.Load(pid); !ok {
- if profiler, err := c.newHwProfiler(pid); err != nil {
+ if c.opts.perfHwProfilersEnabled {
+ if _, ok := c.perfHwProfilers[pid]; !ok {
+ if hwProfiler, err := c.newHwProfiler(pid); err != nil {
level.Error(c.logger).Log("msg", "failed to start hardware profiler", "pid", pid, "cmd", strings.Join(cmdLine, " "), "err", err)
} else {
- c.perfHwProfilers.Store(pid, profiler)
+ c.perfHwProfilers[pid] = hwProfiler
}
}
}
- if c.perfSwProfilersEnabled {
- if _, ok := c.perfSwProfilers.Load(pid); !ok {
- if profiler, err := c.newSwProfiler(pid); err != nil {
+ if c.opts.perfSwProfilersEnabled {
+ if _, ok := c.perfSwProfilers[pid]; !ok {
+ if swProfiler, err := c.newSwProfiler(pid); err != nil {
level.Error(c.logger).Log("msg", "failed to start software profiler", "pid", pid, "cmd", strings.Join(cmdLine, " "), "err", err)
} else {
- c.perfSwProfilers.Store(pid, profiler)
+ c.perfSwProfilers[pid] = swProfiler
}
}
}
- if c.perfCacheProfilersEnabled {
- if _, ok := c.perfCacheProfilers.Load(pid); !ok {
- if profiler, err := c.newCacheProfiler(pid); err != nil {
+ if c.opts.perfCacheProfilersEnabled {
+ if _, ok := c.perfCacheProfilers[pid]; !ok {
+ if cacheProfiler, err := c.newCacheProfiler(pid); err != nil {
level.Error(c.logger).Log("msg", "failed to start cache profiler", "pid", pid, "cmd", strings.Join(cmdLine, " "), "err", err)
} else {
- c.perfCacheProfilers.Store(pid, profiler)
+ c.perfCacheProfilers[pid] = cacheProfiler
}
}
}
@@ -887,61 +854,40 @@ func (c *perfCollector) newCacheProfiler(pid int) (*perf.CacheProfiler, error) {
// closeProfilers stops and closes profilers of PIDs that do not exist anymore.
func (c *perfCollector) closeProfilers(activePIDs []int) {
- if c.perfHwProfilersEnabled {
- c.perfHwProfilers.Range(func(pid, profiler interface{}) bool {
- if pidInt, ok := pid.(int); ok {
- if !slices.Contains(activePIDs, pidInt) {
- if hwProfiler, ok := profiler.(*perf.HardwareProfiler); ok {
- if err := c.closeHwProfiler(hwProfiler); err != nil {
- level.Error(c.logger).Log("msg", "failed to shutdown hardware profiler", "err", err)
- } else {
- c.perfHwProfilers.Delete(pidInt)
- level.Debug(c.logger).Log("msg", "Removed process from hardware profilers map", "pid", pid)
- }
- }
+ if c.opts.perfHwProfilersEnabled {
+ for pid, hwProfiler := range c.perfHwProfilers {
+ if !slices.Contains(activePIDs, pid) {
+ if err := c.closeHwProfiler(hwProfiler); err != nil {
+ level.Error(c.logger).Log("msg", "failed to shutdown hardware profiler", "err", err)
+ } else {
+ delete(c.perfHwProfilers, pid)
}
}
-
- return true
- })
+ }
}
- if c.perfSwProfilersEnabled {
- c.perfSwProfilers.Range(func(pid, profiler interface{}) bool {
- if pidInt, ok := pid.(int); ok {
- if !slices.Contains(activePIDs, pidInt) {
- if swProfiler, ok := profiler.(*perf.SoftwareProfiler); ok {
- if err := c.closeSwProfiler(swProfiler); err != nil {
- level.Error(c.logger).Log("msg", "failed to shutdown software profiler", "err", err)
- } else {
- c.perfSwProfilers.Delete(pidInt)
- level.Debug(c.logger).Log("msg", "Removed process from software profilers map", "pid", pid)
- }
- }
+ if c.opts.perfSwProfilersEnabled {
+ for pid, swProfiler := range c.perfSwProfilers {
+ if !slices.Contains(activePIDs, pid) {
+ if err := c.closeSwProfiler(swProfiler); err != nil {
+ level.Error(c.logger).Log("msg", "failed to shutdown software profiler", "err", err)
+ } else {
+ delete(c.perfSwProfilers, pid)
}
}
-
- return true
- })
+ }
}
- if c.perfCacheProfilersEnabled {
- c.perfCacheProfilers.Range(func(pid, profiler interface{}) bool {
- if pidInt, ok := pid.(int); ok {
- if !slices.Contains(activePIDs, pidInt) {
- if cacheProfiler, ok := profiler.(*perf.CacheProfiler); ok {
- if err := c.closeCacheProfiler(cacheProfiler); err != nil {
- level.Error(c.logger).Log("msg", "failed to shutdown cache profiler", "err", err)
- } else {
- c.perfCacheProfilers.Delete(pidInt)
- level.Debug(c.logger).Log("msg", "Removed process from cache profilers map", "pid", pid)
- }
- }
+ if c.opts.perfCacheProfilersEnabled {
+ for pid, cacheProfiler := range c.perfCacheProfilers {
+ if !slices.Contains(activePIDs, pid) {
+ if err := c.closeCacheProfiler(cacheProfiler); err != nil {
+ level.Error(c.logger).Log("msg", "failed to shutdown cache profiler", "err", err)
+ } else {
+ delete(c.perfCacheProfilers, pid)
}
}
-
- return true
- })
+ }
}
}
diff --git a/pkg/collector/perf_test.go b/pkg/collector/perf_test.go
index f7e615e5..1b996ad3 100644
--- a/pkg/collector/perf_test.go
+++ b/pkg/collector/perf_test.go
@@ -8,7 +8,9 @@ import (
"os"
"testing"
+ "github.com/containerd/cgroups/v3"
"github.com/go-kit/log"
+ "github.com/hodgesds/perf-utils"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
"github.com/stretchr/testify/assert"
@@ -17,11 +19,28 @@ import (
func TestPerfCollector(t *testing.T) {
_, err := CEEMSExporterApp.Parse([]string{
- "--path.procfs", "testdata/proc", "--collector.slurm",
+ "--path.procfs", "testdata/proc",
})
require.NoError(t, err)
- collector, err := NewPerfCollector(log.NewNopLogger())
+ // cgroup manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ mountPoint: "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope",
+ idRegex: slurmCgroupPathRegex,
+ procFilter: func(p string) bool {
+ return slurmIgnoreProcsRegex.MatchString(p)
+ },
+ }
+
+ // perf opts
+ opts := perfOpts{
+ perfHwProfilersEnabled: true,
+ perfSwProfilersEnabled: true,
+ perfCacheProfilersEnabled: true,
+ }
+
+ collector, err := NewPerfCollector(log.NewNopLogger(), cgManager, opts)
require.NoError(t, err)
// Setup background goroutine to capture metrics.
@@ -42,40 +61,30 @@ func TestPerfCollector(t *testing.T) {
require.NoError(t, err)
}
-func TestPerfCollectorWithSlurm(t *testing.T) {
- _, err := CEEMSExporterApp.Parse(
- []string{"--path.procfs", "testdata/proc", "--collector.slurm"},
- )
- require.NoError(t, err)
-
- collector, err := NewPerfCollector(log.NewNopLogger())
- require.NoError(t, err)
-
- // Setup background goroutine to capture metrics.
- metrics := make(chan prometheus.Metric)
- defer close(metrics)
-
- go func() {
- i := 0
- for range metrics {
- i++
- }
- }()
-
- err = collector.Update(metrics)
- require.NoError(t, err)
-}
-
func TestDiscoverProcess(t *testing.T) {
var err error
- collector := perfCollector{
- logger: log.NewNopLogger(),
- envVar: "ENABLE_PROFILING",
- cgroupFS: slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, ""),
+ // cgroup manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ idRegex: slurmCgroupPathRegex,
+ procFilter: func(p string) bool {
+ return slurmIgnoreProcsRegex.MatchString(p)
+ },
+ }
+
+ // perf opts
+ opts := perfOpts{
perfHwProfilersEnabled: true,
perfSwProfilersEnabled: true,
perfCacheProfilersEnabled: true,
+ targetEnvVars: []string{"ENABLE_PROFILING"},
+ }
+
+ collector := perfCollector{
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+ opts: opts,
}
collector.fs, err = procfs.NewFS("testdata/proc")
@@ -120,14 +129,31 @@ func TestNewProfilers(t *testing.T) {
var ok bool
- collector := perfCollector{
- logger: log.NewNopLogger(),
- cgroupFS: slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, ""),
+ // cgroup manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Legacy,
+ idRegex: slurmCgroupPathRegex,
+ procFilter: func(p string) bool {
+ return slurmIgnoreProcsRegex.MatchString(p)
+ },
+ }
+
+ // perf opts
+ opts := perfOpts{
perfHwProfilersEnabled: true,
perfSwProfilersEnabled: true,
perfCacheProfilersEnabled: true,
}
+ collector := perfCollector{
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+ opts: opts,
+ perfHwProfilers: make(map[int]*perf.HardwareProfiler),
+ perfSwProfilers: make(map[int]*perf.SoftwareProfiler),
+ perfCacheProfilers: make(map[int]*perf.CacheProfiler),
+ }
+
collector.fs, err = procfs.NewFS("testdata/proc")
require.NoError(t, err)
@@ -169,12 +195,12 @@ func TestNewProfilers(t *testing.T) {
collector.closeProfilers([]int{})
// check the map should not contain the proc
- _, ok = collector.perfHwProfilers.Load(os.Getpid())
+ _, ok = collector.perfHwProfilers[os.Getpid()]
assert.False(t, ok)
- _, ok = collector.perfSwProfilers.Load(os.Getpid())
+ _, ok = collector.perfSwProfilers[os.Getpid()]
assert.False(t, ok)
- _, ok = collector.perfCacheProfilers.Load(os.Getpid())
+ _, ok = collector.perfCacheProfilers[os.Getpid()]
assert.False(t, ok)
}
diff --git a/pkg/collector/slurm.go b/pkg/collector/slurm.go
index d92a0665..8791a4bd 100644
--- a/pkg/collector/slurm.go
+++ b/pkg/collector/slurm.go
@@ -5,20 +5,14 @@ package collector
import (
"context"
- "errors"
"fmt"
"io/fs"
- "math"
"os"
"path/filepath"
"slices"
- "strconv"
"strings"
"sync"
- "github.com/containerd/cgroups/v3"
- "github.com/containerd/cgroups/v3/cgroup1"
- "github.com/containerd/cgroups/v3/cgroup2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -27,42 +21,83 @@ import (
const (
slurmCollectorSubsystem = "slurm"
- genericSubsystem = "compute"
)
+// CLI opts.
var (
- cgroupsV1Subsystem = CEEMSExporterApp.Flag(
- "collector.slurm.cgroups-v1-subsystem",
- "Active cgroup subsystem for cgroups v1.",
- ).Default("cpuacct").String()
- collectSwapMemoryStatsDepre = CEEMSExporterApp.Flag(
+ // Perf opts.
+ slurmPerfHwProfilersFlag = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-hardware-events",
+ "Enables collection of perf hardware events (default: disabled)",
+ ).Default("false").Bool()
+ slurmPerfHwProfilers = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-hardware-profilers",
+ "perf hardware profilers to collect",
+ ).Strings()
+ slurmPerfSwProfilersFlag = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-software-events",
+ "Enables collection of perf software events (default: disabled)",
+ ).Default("false").Bool()
+ slurmPerfSwProfilers = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-software-profilers",
+ "perf software profilers to collect",
+ ).Strings()
+ slurmPerfCacheProfilersFlag = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-cache-events",
+ "Enables collection of perf cache events (default: disabled)",
+ ).Default("false").Bool()
+ slurmPerfCacheProfilers = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-cache-profilers",
+ "perf cache profilers to collect",
+ ).Strings()
+ slurmPerfProfilersEnvVars = CEEMSExporterApp.Flag(
+ "collector.slurm.perf-env-var",
+ "Processes having any of these environment variables set will be profiled. If empty, all processes will be profiled.",
+ ).Strings()
+
+ // ebpf opts.
+ slurmIOMetricsFlag = CEEMSExporterApp.Flag(
+ "collector.slurm.io-metrics",
+ "Enables collection of IO metrics using ebpf (default: disabled)",
+ ).Default("false").Bool()
+ slurmNetMetricsFlag = CEEMSExporterApp.Flag(
+ "collector.slurm.network-metrics",
+ "Enables collection of network metrics using ebpf (default: disabled)",
+ ).Default("false").Bool()
+ slurmFSMountPoints = CEEMSExporterApp.Flag(
+ "collector.slurm.fs-mount-point",
+ "File system mount points to monitor for IO stats. If empty all mount points are monitored. It is strongly advised to choose appropriate mount points to reduce cardinality.",
+ ).Strings()
+
+ // cgroup opts.
+ slurmCollectSwapMemoryStatsDepre = CEEMSExporterApp.Flag(
"collector.slurm.swap.memory.metrics",
"Enables collection of swap memory metrics (default: disabled)",
).Default("false").Hidden().Bool()
- collectSwapMemoryStats = CEEMSExporterApp.Flag(
+ slurmCollectSwapMemoryStats = CEEMSExporterApp.Flag(
"collector.slurm.swap-memory-metrics",
"Enables collection of swap memory metrics (default: disabled)",
).Default("false").Bool()
- collectPSIStatsDepre = CEEMSExporterApp.Flag(
+ slurmCollectPSIStatsDepre = CEEMSExporterApp.Flag(
"collector.slurm.psi.metrics",
"Enables collection of PSI metrics (default: disabled)",
).Default("false").Hidden().Bool()
- collectPSIStats = CEEMSExporterApp.Flag(
+ slurmCollectPSIStats = CEEMSExporterApp.Flag(
"collector.slurm.psi-metrics",
"Enables collection of PSI metrics (default: disabled)",
).Default("false").Bool()
+
+ // Generic.
+ slurmGPUStatPath = CEEMSExporterApp.Flag(
+ "collector.slurm.gpu-job-map-path",
+ "Path to file that maps GPU ordinals to job IDs.",
+ ).Default("/run/gpujobmap").String()
+
+ // Used for e2e tests.
gpuType = CEEMSExporterApp.Flag(
"collector.slurm.gpu-type",
"GPU device type. Currently only nvidia and amd devices are supported.",
).Hidden().Enum("nvidia", "amd")
- gpuStatPath = CEEMSExporterApp.Flag(
- "collector.slurm.gpu-job-map-path",
- "Path to file that maps GPU ordinals to job IDs.",
- ).Default("/run/gpujobmap").Hidden().String()
- forceCgroupsVersion = CEEMSExporterApp.Flag(
- "collector.slurm.force-cgroups-version",
- "Set cgroups version manually. Used only for testing.",
- ).Hidden().Enum("v1", "v2")
nvidiaSmiPath = CEEMSExporterApp.Flag(
"collector.slurm.nvidia-smi-path",
"Absolute path to nvidia-smi binary. Use only for testing.",
@@ -73,62 +108,34 @@ var (
).Hidden().Default("").String()
)
-// jobProps contains cachable SLURM job properties.
-type jobProps struct {
+// props contains SLURM job properties.
+type props struct {
uuid string // This is SLURM's job ID
gpuOrdinals []string // GPU ordinals bound to job
}
-// CgroupMetric contains metrics returned by cgroup.
-type CgroupMetric struct {
- path string
- cpuUser float64
- cpuSystem float64
- cpuTotal float64
- cpus int
- cpuPressure float64
- memoryRSS float64
- memoryCache float64
- memoryUsed float64
- memoryTotal float64
- memoryFailCount float64
- memswUsed float64
- memswTotal float64
- memswFailCount float64
- memoryPressure float64
- rdmaHCAHandles map[string]float64
- rdmaHCAObjects map[string]float64
- jobuuid string
- jobgpuordinals []string
- err bool
+// emptyGPUOrdinals returns true if gpuOrdinals is empty.
+func (p *props) emptyGPUOrdinals() bool {
+ return len(p.gpuOrdinals) == 0
+}
+
+type slurmMetrics struct {
+ cgMetrics []cgMetric
+ jobProps []props
}
type slurmCollector struct {
- cgroupFS cgroupFS
- hostname string
- gpuDevs map[int]Device
- hostMemTotal float64
- procFS procfs.FS
- numJobs *prometheus.Desc
- jobCPUUser *prometheus.Desc
- jobCPUSystem *prometheus.Desc
- jobCPUs *prometheus.Desc
- jobCPUPressure *prometheus.Desc
- jobMemoryRSS *prometheus.Desc
- jobMemoryCache *prometheus.Desc
- jobMemoryUsed *prometheus.Desc
- jobMemoryTotal *prometheus.Desc
- jobMemoryFailCount *prometheus.Desc
- jobMemswUsed *prometheus.Desc
- jobMemswTotal *prometheus.Desc
- jobMemswFailCount *prometheus.Desc
- jobMemoryPressure *prometheus.Desc
- jobRDMAHCAHandles *prometheus.Desc
- jobRDMAHCAObjects *prometheus.Desc
- jobGpuFlag *prometheus.Desc
- collectError *prometheus.Desc
- jobsCache map[string]jobProps
- logger log.Logger
+ logger log.Logger
+ cgroupManager *cgroupManager
+ cgroupCollector *cgroupCollector
+ perfCollector *perfCollector
+ ebpfCollector *ebpfCollector
+ hostname string
+ gpuDevs map[int]Device
+ procFS procfs.FS
+ jobGpuFlag *prometheus.Desc
+ collectError *prometheus.Desc
+ jobPropsCache map[string]props
}
func init() {
@@ -138,27 +145,85 @@ func init() {
// NewSlurmCollector returns a new Collector exposing a summary of cgroups.
func NewSlurmCollector(logger log.Logger) (Collector, error) {
// Log deprecation notices
- if *collectPSIStatsDepre {
+ if *slurmCollectPSIStatsDepre {
level.Warn(logger).
Log("msg", "flag --collector.slurm.psi.metrics has been deprecated. Use --collector.slurm.psi-metrics instead")
}
- if *collectSwapMemoryStatsDepre {
+ if *slurmCollectSwapMemoryStatsDepre {
level.Warn(logger).
Log("msg", "flag --collector.slurm.swap.memory.metrics has been deprecated. Use --collector.slurm.swap-memory-metrics instead")
}
// Get SLURM's cgroup details
- cgroupFS := slurmCgroupFS(*cgroupfsPath, *cgroupsV1Subsystem, *forceCgroupsVersion)
- level.Info(logger).Log("cgroup", cgroupFS.mode, "mount", cgroupFS.mount)
+ cgroupManager, err := NewCgroupManager("slurm")
+ if err != nil {
+ level.Info(logger).Log("msg", "Failed to create cgroup manager", "err", err)
+
+ return nil, err
+ }
+
+ level.Info(logger).Log("cgroup", cgroupManager)
+
+ // Set cgroup options
+ opts := cgroupOpts{
+ collectSwapMemStats: *slurmCollectSwapMemoryStatsDepre || *slurmCollectSwapMemoryStats,
+ collectPSIStats: *slurmCollectPSIStatsDepre || *slurmCollectPSIStats,
+ }
+
+ // Start new instance of cgroupCollector
+ cgCollector, err := NewCgroupCollector(logger, cgroupManager, opts)
+ if err != nil {
+ level.Info(logger).Log("msg", "Failed to create cgroup collector", "err", err)
+
+ return nil, err
+ }
+
+ // Start new instance of perfCollector
+ var perfCollector *perfCollector
+
+ if perfCollectorEnabled() {
+ perfOpts := perfOpts{
+ perfHwProfilersEnabled: *slurmPerfHwProfilersFlag,
+ perfSwProfilersEnabled: *slurmPerfSwProfilersFlag,
+ perfCacheProfilersEnabled: *slurmPerfCacheProfilersFlag,
+ perfHwProfilers: *slurmPerfHwProfilers,
+ perfSwProfilers: *slurmPerfSwProfilers,
+ perfCacheProfilers: *slurmPerfCacheProfilers,
+ targetEnvVars: *slurmPerfProfilersEnvVars,
+ }
+
+ perfCollector, err = NewPerfCollector(logger, cgroupManager, perfOpts)
+ if err != nil {
+ level.Info(logger).Log("msg", "Failed to create perf collector", "err", err)
+
+ return nil, err
+ }
+ }
+
+ // Start new instance of ebpfCollector
+ var ebpfCollector *ebpfCollector
+
+ if ebpfCollectorEnabled() {
+ ebpfOpts := ebpfOpts{
+ vfsStatsEnabled: *slurmIOMetricsFlag,
+ netStatsEnabled: *slurmNetMetricsFlag,
+ vfsMountPoints: *slurmFSMountPoints,
+ }
+
+ ebpfCollector, err = NewEbpfCollector(logger, cgroupManager, ebpfOpts)
+ if err != nil {
+ level.Info(logger).Log("msg", "Failed to create ebpf collector", "err", err)
+
+ return nil, err
+ }
+ }
// Attempt to get GPU devices
var gpuTypes []string
var gpuDevs map[int]Device
- var err error
-
if *gpuType != "" {
gpuTypes = []string{*gpuType}
} else {
@@ -182,129 +247,15 @@ func NewSlurmCollector(logger log.Logger) (Collector, error) {
return nil, err
}
- // Get total memory of host
- var memTotal float64
-
- file, err := os.Open(procFilePath("meminfo"))
- if err == nil {
- if memInfo, err := parseMemInfo(file); err == nil {
- memTotal = memInfo["MemTotal_bytes"]
- }
- } else {
- level.Error(logger).Log("msg", "Failed to get total memory of the host", "err", err)
- }
-
- defer file.Close()
-
return &slurmCollector{
- cgroupFS: cgroupFS,
- hostname: hostname,
- gpuDevs: gpuDevs,
- hostMemTotal: memTotal,
- procFS: procFS,
- jobsCache: make(map[string]jobProps),
- numJobs: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "units"),
- "Total number of jobs",
- []string{"manager", "hostname"},
- nil,
- ),
- jobCPUUser: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_user_seconds_total"),
- "Total job CPU user seconds",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobCPUSystem: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_system_seconds_total"),
- "Total job CPU system seconds",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- // cpuTotal: prometheus.NewDesc(
- // prometheus.BuildFQName(Namespace, genericSubsystem, "job_cpu_total_seconds"),
- // "Total job CPU total seconds",
- // []string{"manager", "hostname", "uuid"},
- // nil,
- // ),
- jobCPUs: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpus"),
- "Total number of job CPUs",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobCPUPressure: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_cpu_psi_seconds"),
- "Total CPU PSI in seconds",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryRSS: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_rss_bytes"),
- "Memory RSS used in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryCache: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_cache_bytes"),
- "Memory cache used in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryUsed: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_used_bytes"),
- "Memory used in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryTotal: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_total_bytes"),
- "Memory total in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryFailCount: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_fail_count"),
- "Memory fail count",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemswUsed: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_used_bytes"),
- "Swap used in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemswTotal: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_total_bytes"),
- "Swap total in bytes",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemswFailCount: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memsw_fail_count"),
- "Swap fail count",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobMemoryPressure: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_memory_psi_seconds"),
- "Total memory PSI in seconds",
- []string{"manager", "hostname", "uuid"},
- nil,
- ),
- jobRDMAHCAHandles: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_rdma_hca_handles"),
- "Current number of RDMA HCA handles",
- []string{"manager", "hostname", "uuid", "device"},
- nil,
- ),
- jobRDMAHCAObjects: prometheus.NewDesc(
- prometheus.BuildFQName(Namespace, genericSubsystem, "unit_rdma_hca_objects"),
- "Current number of RDMA HCA objects",
- []string{"manager", "hostname", "uuid", "device"},
- nil,
- ),
+ cgroupManager: cgroupManager,
+ cgroupCollector: cgCollector,
+ perfCollector: perfCollector,
+ ebpfCollector: ebpfCollector,
+ hostname: hostname,
+ gpuDevs: gpuDevs,
+ procFS: procFS,
+ jobPropsCache: make(map[string]props),
jobGpuFlag: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, genericSubsystem, "unit_gpu_index_flag"),
"Indicates running job on GPU, 1=job running",
@@ -330,110 +281,133 @@ func NewSlurmCollector(logger log.Logger) (Collector, error) {
// Update implements Collector and update job metrics.
func (c *slurmCollector) Update(ch chan<- prometheus.Metric) error {
- // Send job level metrics
- metrics, err := c.getJobsMetrics()
+ // Discover all active cgroups
+ metrics, err := c.discoverCgroups()
if err != nil {
return err
}
- // First send num jobs on the current host
- ch <- prometheus.MustNewConstMetric(c.numJobs, prometheus.GaugeValue, float64(len(metrics)), c.cgroupFS.manager, c.hostname)
+ // Start a wait group
+ wg := sync.WaitGroup{}
+ wg.Add(1)
- // Send metrics of each cgroup
- for _, m := range metrics {
- if m.err {
- ch <- prometheus.MustNewConstMetric(c.collectError, prometheus.GaugeValue, 1, c.manager, c.hostname, m.jobuuid)
- }
+ go func() {
+ defer wg.Done()
- // CPU stats
- ch <- prometheus.MustNewConstMetric(c.jobCPUUser, prometheus.CounterValue, m.cpuUser, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobCPUSystem, prometheus.CounterValue, m.cpuSystem, c.cgroupFS.manager, c.hostname, m.jobuuid)
- // ch <- prometheus.MustNewConstMetric(c.cpuTotal, prometheus.GaugeValue, m.cpuTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobCPUs, prometheus.GaugeValue, float64(m.cpus), c.cgroupFS.manager, c.hostname, m.jobuuid)
-
- // Memory stats
- ch <- prometheus.MustNewConstMetric(c.jobMemoryRSS, prometheus.GaugeValue, m.memoryRSS, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryCache, prometheus.GaugeValue, m.memoryCache, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryUsed, prometheus.GaugeValue, m.memoryUsed, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryTotal, prometheus.GaugeValue, m.memoryTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryFailCount, prometheus.GaugeValue, m.memoryFailCount, c.cgroupFS.manager, c.hostname, m.jobuuid)
-
- // PSI stats. Push them only if they are available
- if *collectSwapMemoryStatsDepre || *collectSwapMemoryStats {
- ch <- prometheus.MustNewConstMetric(c.jobMemswUsed, prometheus.GaugeValue, m.memswUsed, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemswTotal, prometheus.GaugeValue, m.memswTotal, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemswFailCount, prometheus.GaugeValue, m.memswFailCount, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ // Update cgroup metrics
+ if err := c.cgroupCollector.Update(ch, metrics.cgMetrics); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update cgroup stats", "err", err)
}
- if *collectPSIStatsDepre || *collectPSIStats {
- ch <- prometheus.MustNewConstMetric(c.jobCPUPressure, prometheus.GaugeValue, m.cpuPressure, c.cgroupFS.manager, c.hostname, m.jobuuid)
- ch <- prometheus.MustNewConstMetric(c.jobMemoryPressure, prometheus.GaugeValue, m.memoryPressure, c.cgroupFS.manager, c.hostname, m.jobuuid)
+ // Update slurm job GPU ordinals
+ if len(c.gpuDevs) > 0 {
+ c.updateGPUOrdinals(ch, metrics.jobProps)
}
+ }()
- // RDMA stats
- for device, handles := range m.rdmaHCAHandles {
- if handles > 0 {
- ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAHandles, prometheus.GaugeValue, handles, c.cgroupFS.manager, c.hostname, m.jobuuid, device)
- }
- }
+ if perfCollectorEnabled() {
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
- for device, objects := range m.rdmaHCAHandles {
- if objects > 0 {
- ch <- prometheus.MustNewConstMetric(c.jobRDMAHCAObjects, prometheus.GaugeValue, objects, c.cgroupFS.manager, c.hostname, m.jobuuid, device)
+ // Update perf metrics
+ if err := c.perfCollector.Update(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update perf stats", "err", err)
}
- }
+ }()
+ }
- // GPU job mapping
- if len(c.gpuDevs) > 0 {
- for _, gpuOrdinal := range m.jobgpuordinals {
- var uuid string
- // Check the int index of devices where gpuOrdinal == dev.index
- for _, dev := range c.gpuDevs {
- if gpuOrdinal == dev.index {
- uuid = dev.uuid
-
- break
- }
- }
- ch <- prometheus.MustNewConstMetric(c.jobGpuFlag, prometheus.GaugeValue, float64(1), c.cgroupFS.manager, c.hostname, m.jobuuid, gpuOrdinal, fmt.Sprintf("%s-gpu-%s", c.hostname, gpuOrdinal), uuid)
+ if ebpfCollectorEnabled() {
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ // Update ebpf metrics
+ if err := c.ebpfCollector.Update(ch); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to update IO and/or network stats", "err", err)
}
- }
+ }()
}
+ // Wait for all go routines
+ wg.Wait()
+
return nil
}
// Stop releases system resources used by the collector.
-func (c *slurmCollector) Stop(_ context.Context) error {
+func (c *slurmCollector) Stop(ctx context.Context) error {
level.Debug(c.logger).Log("msg", "Stopping", "collector", slurmCollectorSubsystem)
+ // Stop all sub collectors
+ // Stop cgroupCollector
+ if err := c.cgroupCollector.Stop(ctx); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to stop cgroup collector", "err", err)
+ }
+
+ // Stop perfCollector
+ if perfCollectorEnabled() {
+ if err := c.perfCollector.Stop(ctx); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to stop perf collector", "err", err)
+ }
+ }
+
+ // Stop ebpfCollector
+ if ebpfCollectorEnabled() {
+ if err := c.ebpfCollector.Stop(ctx); err != nil {
+ level.Error(c.logger).Log("msg", "Failed to stop ebpf collector", "err", err)
+ }
+ }
+
return nil
}
-// Get current Jobs metrics from cgroups.
-func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
+// updateGPUOrdinals updates the metrics channel with GPU ordinals for SLURM job.
+func (c *slurmCollector) updateGPUOrdinals(ch chan<- prometheus.Metric, jobProps []props) {
+ // Update slurm job properties
+ for _, p := range jobProps {
+ // GPU job mapping
+ for _, gpuOrdinal := range p.gpuOrdinals {
+ var gpuuuid string
+ // Check the int index of devices where gpuOrdinal == dev.index
+ for _, dev := range c.gpuDevs {
+ if gpuOrdinal == dev.index {
+ gpuuuid = dev.uuid
+
+ break
+ }
+ }
+ ch <- prometheus.MustNewConstMetric(c.jobGpuFlag, prometheus.GaugeValue, float64(1), c.cgroupManager.manager, c.hostname, p.uuid, gpuOrdinal, fmt.Sprintf("%s-gpu-%s", c.hostname, gpuOrdinal), gpuuuid)
+ }
+ }
+}
+
+// discoverCgroups finds active cgroup paths and returns initialised metric structs.
+func (c *slurmCollector) discoverCgroups() (slurmMetrics, error) {
// Get currently active jobs and set them in activeJobs state variable
var activeJobUUIDs []string
- var metrics []CgroupMetric
+ var jobProps []props
- var gpuOrdinals []string
+ var cgMetrics []cgMetric
- level.Debug(c.logger).Log("msg", "Loading cgroup", "path", c.cgroupFS.mount)
+ var gpuOrdinals []string
// Walk through all cgroups and get cgroup paths
- if err := filepath.WalkDir(c.cgroupFS.mount, func(p string, info fs.DirEntry, err error) error {
+ if err := filepath.WalkDir(c.cgroupManager.mountPoint, func(p string, info fs.DirEntry, err error) error {
if err != nil {
return err
}
// Ignore step jobs
- if !info.IsDir() || c.cgroupFS.pathFilter(p) {
+ if !info.IsDir() || c.cgroupManager.pathFilter(p) {
return nil
}
// Get relative path of cgroup
- rel, err := filepath.Rel(c.cgroupFS.root, p)
+ rel, err := filepath.Rel(c.cgroupManager.root, p)
if err != nil {
level.Error(c.logger).Log("msg", "Failed to resolve relative path for cgroup", "path", p, "err", err)
@@ -441,7 +415,7 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
}
// Get cgroup ID which is job ID
- cgroupIDMatches := c.cgroupFS.idRegex.FindStringSubmatch(p)
+ cgroupIDMatches := c.cgroupManager.idRegex.FindStringSubmatch(p)
if len(cgroupIDMatches) <= 1 {
return nil
}
@@ -460,134 +434,36 @@ func (c *slurmCollector) getJobsMetrics() ([]CgroupMetric, error) {
// Get GPU ordinals of the job
if len(c.gpuDevs) > 0 {
- if props, ok := c.jobsCache[jobuuid]; !ok || (ok && !c.containsGPUOrdinals(props)) {
+ if jProps, ok := c.jobPropsCache[jobuuid]; !ok || (ok && jProps.emptyGPUOrdinals()) {
gpuOrdinals = c.gpuOrdinals(jobuuid)
- c.jobsCache[jobuuid] = jobProps{uuid: jobuuid, gpuOrdinals: gpuOrdinals}
+ c.jobPropsCache[jobuuid] = props{uuid: jobuuid, gpuOrdinals: gpuOrdinals}
+ jobProps = append(jobProps, c.jobPropsCache[jobuuid])
} else {
- gpuOrdinals = c.jobsCache[jobuuid].gpuOrdinals
+ jobProps = append(jobProps, jProps)
}
}
activeJobUUIDs = append(activeJobUUIDs, jobuuid)
- metrics = append(metrics, CgroupMetric{jobuuid: jobuuid, path: "/" + rel, jobgpuordinals: gpuOrdinals})
+ cgMetrics = append(cgMetrics, cgMetric{uuid: jobuuid, path: "/" + rel})
level.Debug(c.logger).Log("msg", "cgroup path", "path", p)
return nil
}); err != nil {
level.Error(c.logger).
- Log("msg", "Error walking cgroup subsystem", "path", c.cgroupFS.mount, "err", err)
+ Log("msg", "Error walking cgroup subsystem", "path", c.cgroupManager.mountPoint, "err", err)
- return nil, err
+ return slurmMetrics{}, err
}
- // Remove expired jobs from jobsCache
- for uuid := range c.jobsCache {
+ // Remove expired jobs from jobPropsCache
+ for uuid := range c.jobPropsCache {
if !slices.Contains(activeJobUUIDs, uuid) {
- delete(c.jobsCache, uuid)
- }
- }
-
- // Start wait group for go routines
- wg := &sync.WaitGroup{}
- wg.Add(len(metrics))
-
- // No need for any lock primitives here as we read/write
- // a different element of slice in each go routine
- for i := range len(metrics) {
- go func(idx int) {
- defer wg.Done()
-
- c.getMetrics(&metrics[idx])
- }(i)
- }
-
- // Wait for all go routines
- wg.Wait()
-
- return metrics, nil
-}
-
-// getMetrics fetches metrics of a given SLURM cgroups path.
-func (c *slurmCollector) getMetrics(metric *CgroupMetric) {
- if c.cgroupFS.mode == cgroups.Unified {
- c.getCgroupsV2Metrics(metric)
- } else {
- c.getCgroupsV1Metrics(metric)
- }
-}
-
-// parseCPUSet parses cpuset.cpus file to return a list of CPUs in the cgroup.
-func (c *slurmCollector) parseCPUSet(cpuset string) ([]string, error) {
- var cpus []string
-
- var start, end int
-
- var err error
-
- if cpuset == "" {
- return nil, errors.New("empty cpuset file")
- }
-
- ranges := strings.Split(cpuset, ",")
- for _, r := range ranges {
- boundaries := strings.Split(r, "-")
- if len(boundaries) == 1 {
- start, err = strconv.Atoi(boundaries[0])
- if err != nil {
- return nil, err
- }
-
- end = start
- } else if len(boundaries) == 2 {
- start, err = strconv.Atoi(boundaries[0])
- if err != nil {
- return nil, err
- }
-
- end, err = strconv.Atoi(boundaries[1])
- if err != nil {
- return nil, err
- }
- }
-
- for e := start; e <= end; e++ {
- cpu := strconv.Itoa(e)
- cpus = append(cpus, cpu)
+ delete(c.jobPropsCache, uuid)
}
}
- return cpus, nil
-}
-
-// getCPUs returns list of CPUs in the cgroup.
-func (c *slurmCollector) getCPUs(path string) ([]string, error) {
- var cpusPath string
- if c.cgroupFS.mode == cgroups.Unified {
- cpusPath = fmt.Sprintf("%s%s/cpuset.cpus.effective", *cgroupfsPath, path)
- } else {
- cpusPath = fmt.Sprintf("%s/cpuset%s/cpuset.cpus", *cgroupfsPath, path)
- }
-
- if !fileExists(cpusPath) {
- return nil, fmt.Errorf("cpuset file %s not found", cpusPath)
- }
-
- cpusData, err := os.ReadFile(cpusPath)
- if err != nil {
- level.Error(c.logger).Log("msg", "Error reading cpuset", "cpuset", cpusPath, "err", err)
-
- return nil, err
- }
-
- cpus, err := c.parseCPUSet(strings.TrimSuffix(string(cpusData), "\n"))
- if err != nil {
- level.Error(c.logger).Log("msg", "Error parsing cpuset", "cpuset", cpusPath, "err", err)
-
- return nil, err
- }
-
- return cpus, nil
+ return slurmMetrics{cgMetrics: cgMetrics, jobProps: jobProps}, nil
}
// gpuOrdinalsFromProlog returns GPU ordinals of jobs from prolog generated run time files by SLURM.
@@ -609,7 +485,7 @@ func (c *slurmCollector) gpuOrdinalsFromProlog(uuid string) []string {
// typically 2/4/8 GPUs per node.
for i := range c.gpuDevs {
dev := c.gpuDevs[i]
- gpuJobMapInfo := fmt.Sprintf("%s/%s", *gpuStatPath, dev.index)
+ gpuJobMapInfo := fmt.Sprintf("%s/%s", *slurmGPUStatPath, dev.index)
// NOTE: Look for file name with UUID as it will be more appropriate with
// MIG instances.
@@ -706,15 +582,9 @@ func (c *slurmCollector) gpuOrdinalsFromEnviron(uuid string) []string {
// Wait for all go routines to finish
wg.Wait()
- // Set jobProps fields
return gpuOrdinals
}
-// containsGPUOrdinals returns true if jobProps has gpuOrdinals populated.
-func (c *slurmCollector) containsGPUOrdinals(p jobProps) bool {
- return len(c.gpuDevs) > 0 && len(p.gpuOrdinals) == 0
-}
-
// gpuOrdinals returns GPU ordinals bound to current job.
func (c *slurmCollector) gpuOrdinals(uuid string) []string {
var gpuOrdinals []string
@@ -741,198 +611,12 @@ func (c *slurmCollector) gpuOrdinals(uuid string) []string {
return gpuOrdinals
}
-// Get metrics from cgroups v1.
-func (c *slurmCollector) getCgroupsV1Metrics(metric *CgroupMetric) {
- path := metric.path
- // metric := CgroupMetric{path: path, jobuuid: job.uuid}
-
- level.Debug(c.logger).Log("msg", "Loading cgroup v1", "path", path)
-
- ctrl, err := cgroup1.Load(cgroup1.StaticPath(path), cgroup1.WithHierarchy(subsystem))
- if err != nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", path, "err", err)
-
- return
- }
-
- // Load cgroup stats
- stats, err := ctrl.Stat(cgroup1.IgnoreNotExist)
- if err != nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Failed to stat cgroups", "path", path, "err", err)
-
- return
- }
-
- if stats == nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Cgroup stats are nil", "path", path)
-
- return
- }
-
- // Get CPU stats
- if stats.GetCPU() != nil {
- if stats.GetCPU().GetUsage() != nil {
- metric.cpuUser = float64(stats.GetCPU().GetUsage().GetUser()) / 1000000000.0
- metric.cpuSystem = float64(stats.GetCPU().GetUsage().GetKernel()) / 1000000000.0
- metric.cpuTotal = float64(stats.GetCPU().GetUsage().GetTotal()) / 1000000000.0
- }
- }
-
- if cpus, err := c.getCPUs(path); err == nil {
- metric.cpus = len(cpus)
- }
-
- // Get memory stats
- if stats.GetMemory() != nil {
- metric.memoryRSS = float64(stats.GetMemory().GetTotalRSS())
- metric.memoryCache = float64(stats.GetMemory().GetTotalCache())
-
- if stats.GetMemory().GetUsage() != nil {
- metric.memoryUsed = float64(stats.GetMemory().GetUsage().GetUsage())
- // If memory usage limit is set as "max", cgroups lib will set it to
- // math.MaxUint64. Here we replace it with total system memory
- if stats.GetMemory().GetUsage().GetLimit() == math.MaxUint64 && c.hostMemTotal != 0 {
- metric.memoryTotal = c.hostMemTotal
- } else {
- metric.memoryTotal = float64(stats.GetMemory().GetUsage().GetLimit())
- }
-
- metric.memoryFailCount = float64(stats.GetMemory().GetUsage().GetFailcnt())
- }
-
- if stats.GetMemory().GetSwap() != nil {
- metric.memswUsed = float64(stats.GetMemory().GetSwap().GetUsage())
- // If memory usage limit is set as "max", cgroups lib will set it to
- // math.MaxUint64. Here we replace it with total system memory
- if stats.GetMemory().GetSwap().GetLimit() == math.MaxUint64 && c.hostMemTotal != 0 {
- metric.memswTotal = c.hostMemTotal
- } else {
- metric.memswTotal = float64(stats.GetMemory().GetSwap().GetLimit())
- }
-
- metric.memswFailCount = float64(stats.GetMemory().GetSwap().GetFailcnt())
- }
- }
-
- // Get RDMA metrics if available
- if stats.GetRdma() != nil {
- metric.rdmaHCAHandles = make(map[string]float64)
- metric.rdmaHCAObjects = make(map[string]float64)
-
- for _, device := range stats.GetRdma().GetCurrent() {
- metric.rdmaHCAHandles[device.GetDevice()] = float64(device.GetHcaHandles())
- metric.rdmaHCAObjects[device.GetDevice()] = float64(device.GetHcaObjects())
- }
- }
+// perfCollectorEnabled returns true if any of perf profilers are enabled.
+func perfCollectorEnabled() bool {
+ return *slurmPerfHwProfilersFlag || *slurmPerfSwProfilersFlag || *slurmPerfCacheProfilersFlag
}
-// Get Job metrics from cgroups v2.
-func (c *slurmCollector) getCgroupsV2Metrics(metric *CgroupMetric) {
- path := metric.path
- // metric := CgroupMetric{path: path, jobuuid: job.uuid}
-
- level.Debug(c.logger).Log("msg", "Loading cgroup v2", "path", path)
-
- // Load cgroups
- ctrl, err := cgroup2.Load(path, cgroup2.WithMountpoint(*cgroupfsPath))
- if err != nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", path, "err", err)
-
- return
- }
-
- // Get stats from cgroup
- stats, err := ctrl.Stat()
- if err != nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Failed to stat cgroups", "path", path, "err", err)
-
- return
- }
-
- if stats == nil {
- metric.err = true
-
- level.Error(c.logger).Log("msg", "Cgroup stats are nil", "path", path)
-
- return
- }
-
- // Get CPU stats
- if stats.GetCPU() != nil {
- metric.cpuUser = float64(stats.GetCPU().GetUserUsec()) / 1000000.0
- metric.cpuSystem = float64(stats.GetCPU().GetSystemUsec()) / 1000000.0
- metric.cpuTotal = float64(stats.GetCPU().GetUsageUsec()) / 1000000.0
-
- if stats.GetCPU().GetPSI() != nil {
- metric.cpuPressure = float64(stats.GetCPU().GetPSI().GetFull().GetTotal()) / 1000000.0
- }
- }
-
- if cpus, err := c.getCPUs(path); err == nil {
- metric.cpus = len(cpus)
- }
-
- // Get memory stats
- // cgroups2 does not expose swap memory events. So we dont set memswFailCount
- if stats.GetMemory() != nil {
- metric.memoryUsed = float64(stats.GetMemory().GetUsage())
- // If memory usage limit is set as "max", cgroups lib will set it to
- // math.MaxUint64. Here we replace it with total system memory
- if stats.GetMemory().GetUsageLimit() == math.MaxUint64 && c.hostMemTotal > 0 {
- metric.memoryTotal = c.hostMemTotal
- } else {
- metric.memoryTotal = float64(stats.GetMemory().GetUsageLimit())
- }
-
- metric.memoryCache = float64(stats.GetMemory().GetFile()) // This is page cache
- metric.memoryRSS = float64(stats.GetMemory().GetAnon())
- metric.memswUsed = float64(stats.GetMemory().GetSwapUsage())
- // If memory usage limit is set as "max", cgroups lib will set it to
- // math.MaxUint64. Here we replace it with total system memory
- if stats.GetMemory().GetSwapLimit() == math.MaxUint64 && c.hostMemTotal > 0 {
- metric.memswTotal = c.hostMemTotal
- } else {
- metric.memswTotal = float64(stats.GetMemory().GetSwapLimit())
- }
-
- if stats.GetMemory().GetPSI() != nil {
- metric.memoryPressure = float64(stats.GetMemory().GetPSI().GetFull().GetTotal()) / 1000000.0
- }
- }
- // Get memory events
- if stats.GetMemoryEvents() != nil {
- metric.memoryFailCount = float64(stats.GetMemoryEvents().GetOom())
- }
-
- // Get RDMA stats
- if stats.GetRdma() != nil {
- metric.rdmaHCAHandles = make(map[string]float64)
- metric.rdmaHCAObjects = make(map[string]float64)
-
- for _, device := range stats.GetRdma().GetCurrent() {
- metric.rdmaHCAHandles[device.GetDevice()] = float64(device.GetHcaHandles())
- metric.rdmaHCAObjects[device.GetDevice()] = float64(device.GetHcaObjects())
- }
- }
-}
-
-// subsystem returns cgroups v1 subsystems.
-func subsystem() ([]cgroup1.Subsystem, error) {
- s := []cgroup1.Subsystem{
- cgroup1.NewCpuacct(*cgroupfsPath),
- cgroup1.NewMemory(*cgroupfsPath),
- cgroup1.NewRdma(*cgroupfsPath),
- }
-
- return s, nil
+// ebpfCollectorEnabled returns true if any of ebpf stats are enabled.
+func ebpfCollectorEnabled() bool {
+ return *slurmIOMetricsFlag || *slurmNetMetricsFlag
}
diff --git a/pkg/collector/slurm_test.go b/pkg/collector/slurm_test.go
index 0c8208be..02cc9914 100644
--- a/pkg/collector/slurm_test.go
+++ b/pkg/collector/slurm_test.go
@@ -8,8 +8,10 @@ import (
"fmt"
"os"
"strconv"
+ "strings"
"testing"
+ "github.com/containerd/cgroups/v3"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
@@ -17,8 +19,6 @@ import (
"github.com/stretchr/testify/require"
)
-var expectedSlurmMetrics CgroupMetric
-
func mockGPUDevices() map[int]Device {
devs := make(map[int]Device, 4)
@@ -37,8 +37,9 @@ func TestNewSlurmCollector(t *testing.T) {
"--collector.slurm.gpu-job-map-path", "testdata/gpujobmap",
"--collector.slurm.swap-memory-metrics",
"--collector.slurm.psi-metrics",
+ "--collector.slurm.perf-hardware-events",
"--collector.slurm.nvidia-smi-path", "testdata/nvidia-smi",
- "--collector.slurm.force-cgroups-version", "v2",
+ "--collector.cgroups.force-version", "v2",
},
)
require.NoError(t, err)
@@ -64,174 +65,58 @@ func TestNewSlurmCollector(t *testing.T) {
require.NoError(t, err)
}
-func TestCgroupsV2SlurmJobMetrics(t *testing.T) {
+func TestSlurmJobPropsWithProlog(t *testing.T) {
_, err := CEEMSExporterApp.Parse(
[]string{
"--path.cgroupfs", "testdata/sys/fs/cgroup",
"--collector.slurm.gpu-job-map-path", "testdata/gpujobmap",
+ "--collector.cgroups.force-version", "v2",
},
)
require.NoError(t, err)
- c := slurmCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
- gpuDevs: mockGPUDevices(),
- hostMemTotal: float64(123456),
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
- }
-
- expectedSlurmMetrics = CgroupMetric{
- path: "/system.slice/slurmstepd.scope/job_1009249",
- cpuUser: 60375.292848,
- cpuSystem: 115.777502,
- cpuTotal: 60491.070351,
- cpus: 2,
- cpuPressure: 0,
- memoryRSS: 4.098592768e+09,
- memoryCache: 0,
- memoryUsed: 4.111491072e+09,
- memoryTotal: 4.294967296e+09,
- memoryFailCount: 0,
- memswUsed: 0,
- memswTotal: 123456,
- memswFailCount: 0,
- memoryPressure: 0,
- rdmaHCAHandles: map[string]float64{"hfi1_0": 479, "hfi1_1": 1479, "hfi1_2": 2479},
- rdmaHCAObjects: map[string]float64{"hfi1_0": 340, "hfi1_1": 1340, "hfi1_2": 2340},
- jobuuid: "1009249",
- jobgpuordinals: []string{"0"},
- err: false,
- }
-
- metrics, err := c.getJobsMetrics()
- require.NoError(t, err)
-
- var gotMetric CgroupMetric
-
- for _, metric := range metrics {
- if metric.jobuuid == expectedSlurmMetrics.jobuuid {
- gotMetric = metric
- }
- }
-
- assert.Equal(t, expectedSlurmMetrics, gotMetric)
-}
-
-func TestCgroupsV2SlurmJobMetricsWithProcFs(t *testing.T) {
- _, err := CEEMSExporterApp.Parse(
- []string{
- "--path.cgroupfs", "testdata/sys/fs/cgroup",
- "--path.procfs", "testdata/proc",
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Unified,
+ mountPoint: "testdata/sys/fs/cgroup/system.slice/slurmstepd.scope",
+ idRegex: slurmCgroupPathRegex,
+ pathFilter: func(p string) bool {
+ return strings.Contains(p, "/step_")
},
- )
- require.NoError(t, err)
-
- procFS, err := procfs.NewFS(*procfsPath)
- require.NoError(t, err)
-
- c := slurmCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
- gpuDevs: mockGPUDevices(),
- hostMemTotal: float64(123456),
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
- procFS: procFS,
- }
-
- expectedSlurmMetrics = CgroupMetric{
- path: "/system.slice/slurmstepd.scope/job_1009248",
- cpuUser: 60375.292848,
- cpuSystem: 115.777502,
- cpuTotal: 60491.070351,
- cpus: 2,
- cpuPressure: 0,
- memoryRSS: 4.098592768e+09,
- memoryCache: 0,
- memoryUsed: 4.111491072e+09,
- memoryTotal: 4.294967296e+09,
- memoryFailCount: 0,
- memswUsed: 0,
- memswTotal: 123456,
- memswFailCount: 0,
- memoryPressure: 0,
- rdmaHCAHandles: make(map[string]float64),
- rdmaHCAObjects: make(map[string]float64),
- jobuuid: "1009248",
- jobgpuordinals: []string{"2", "3"},
- err: false,
- }
-
- metrics, err := c.getJobsMetrics()
- require.NoError(t, err)
-
- var gotMetric CgroupMetric
-
- for _, metric := range metrics {
- if metric.jobuuid == expectedSlurmMetrics.jobuuid {
- gotMetric = metric
- }
}
- assert.Equal(t, expectedSlurmMetrics, gotMetric)
-}
-
-func TestCgroupsV2SlurmJobMetricsNoJobProps(t *testing.T) {
- _, err := CEEMSExporterApp.Parse(
- []string{
- "--path.cgroupfs", "testdata/sys/fs/cgroup",
- },
- )
- require.NoError(t, err)
-
c := slurmCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "", "v2"),
- gpuDevs: mockGPUDevices(),
- logger: log.NewNopLogger(),
- jobsCache: make(map[string]jobProps),
+ gpuDevs: mockGPUDevices(),
+ logger: log.NewNopLogger(),
+ cgroupManager: cgManager,
+ jobPropsCache: make(map[string]props),
}
- expectedSlurmMetrics = CgroupMetric{
- path: "/system.slice/slurmstepd.scope/job_1009248",
- cpuUser: 60375.292848,
- cpuSystem: 115.777502,
- cpuTotal: 60491.070351,
- cpus: 2,
- cpuPressure: 0,
- memoryRSS: 4.098592768e+09,
- memoryCache: 0,
- memoryUsed: 4.111491072e+09,
- memoryTotal: 4.294967296e+09,
- memoryFailCount: 0,
- memswUsed: 0,
- memswTotal: 1.8446744073709552e+19,
- memswFailCount: 0,
- memoryPressure: 0,
- rdmaHCAHandles: make(map[string]float64),
- rdmaHCAObjects: make(map[string]float64),
- jobuuid: "1009248",
- err: false,
+ expectedProps := props{
+ gpuOrdinals: []string{"0"},
+ uuid: "1009249",
}
- metrics, err := c.getJobsMetrics()
+ metrics, err := c.discoverCgroups()
require.NoError(t, err)
- var gotMetric CgroupMetric
+ var gotProps props
- for _, metric := range metrics {
- if metric.jobuuid == expectedSlurmMetrics.jobuuid {
- gotMetric = metric
+ for _, props := range metrics.jobProps {
+ if props.uuid == expectedProps.uuid {
+ gotProps = props
}
}
- assert.Equal(t, expectedSlurmMetrics, gotMetric)
+ assert.Equal(t, expectedProps, gotProps)
}
-func TestCgroupsV1SlurmJobMetrics(t *testing.T) {
+func TestSlurmJobPropsWithProcsFS(t *testing.T) {
_, err := CEEMSExporterApp.Parse(
[]string{
"--path.cgroupfs", "testdata/sys/fs/cgroup",
"--path.procfs", "testdata/proc",
+ "--collector.cgroups.force-version", "v1",
},
)
require.NoError(t, err)
@@ -239,49 +124,41 @@ func TestCgroupsV1SlurmJobMetrics(t *testing.T) {
procFS, err := procfs.NewFS(*procfsPath)
require.NoError(t, err)
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Legacy,
+ mountPoint: "testdata/sys/fs/cgroup/cpuacct/slurm",
+ idRegex: slurmCgroupPathRegex,
+ pathFilter: func(p string) bool {
+ return strings.Contains(p, "/step_")
+ },
+ }
+
c := slurmCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
- logger: log.NewNopLogger(),
- gpuDevs: mockGPUDevices(),
- jobsCache: make(map[string]jobProps),
- procFS: procFS,
+ cgroupManager: cgManager,
+ gpuDevs: mockGPUDevices(),
+ logger: log.NewNopLogger(),
+ jobPropsCache: make(map[string]props),
+ procFS: procFS,
}
- expectedSlurmMetrics = CgroupMetric{
- path: "/slurm/uid_1000/job_1009248",
- cpuUser: 0.39,
- cpuSystem: 0.45,
- cpuTotal: 1.012410966,
- cpus: 0,
- cpuPressure: 0,
- memoryRSS: 1.0407936e+07,
- memoryCache: 2.1086208e+07,
- memoryUsed: 4.0194048e+07,
- memoryTotal: 2.01362030592e+11,
- memoryFailCount: 0,
- memswUsed: 4.032512e+07,
- memswTotal: 9.223372036854772e+18,
- memswFailCount: 0,
- memoryPressure: 0,
- rdmaHCAHandles: map[string]float64{"hfi1_0": 479, "hfi1_1": 1479, "hfi1_2": 2479},
- rdmaHCAObjects: map[string]float64{"hfi1_0": 340, "hfi1_1": 1340, "hfi1_2": 2340},
- jobuuid: "1009248",
- jobgpuordinals: []string{"2", "3"},
- err: false,
+ expectedProps := props{
+ uuid: "1009248",
+ gpuOrdinals: []string{"2", "3"},
}
- metrics, err := c.getJobsMetrics()
+ metrics, err := c.discoverCgroups()
require.NoError(t, err)
- var gotMetric CgroupMetric
+ var gotProps props
- for _, metric := range metrics {
- if metric.jobuuid == expectedSlurmMetrics.jobuuid {
- gotMetric = metric
+ for _, props := range metrics.jobProps {
+ if props.uuid == expectedProps.uuid {
+ gotProps = props
}
}
- assert.Equal(t, expectedSlurmMetrics, gotMetric)
+ assert.Equal(t, expectedProps, gotProps)
}
func TestJobPropsCaching(t *testing.T) {
@@ -303,12 +180,23 @@ func TestJobPropsCaching(t *testing.T) {
)
require.NoError(t, err)
+ // cgroup Manager
+ cgManager := &cgroupManager{
+ mode: cgroups.Legacy,
+ root: cgroupsPath,
+ idRegex: slurmCgroupPathRegex,
+ mountPoint: cgroupsPath + "/cpuacct/slurm",
+ pathFilter: func(p string) bool {
+ return false
+ },
+ }
+
mockGPUDevs := mockGPUDevices()
c := slurmCollector{
- cgroupFS: slurmCgroupFS(*cgroupfsPath, "cpuacct", "v1"),
- logger: log.NewNopLogger(),
- gpuDevs: mockGPUDevs,
- jobsCache: make(map[string]jobProps),
+ cgroupManager: cgManager,
+ logger: log.NewNopLogger(),
+ gpuDevs: mockGPUDevs,
+ jobPropsCache: make(map[string]props),
}
// Add cgroups
@@ -325,16 +213,16 @@ func TestJobPropsCaching(t *testing.T) {
require.NoError(t, err)
}
- // Now call get metrics which should populate jobsCache
- _, err = c.getJobsMetrics()
+ // Now call get metrics which should populate jobPropsCache
+ _, err = c.discoverCgroups()
require.NoError(t, err)
- // Check if jobsCache has 20 jobs and GPU ordinals are correct
- assert.Len(t, c.jobsCache, 20)
+ // Check if jobPropsCache has 20 jobs and GPU ordinals are correct
+ assert.Len(t, c.jobPropsCache, 20)
for igpu := range mockGPUDevs {
gpuIDString := strconv.FormatInt(int64(igpu), 10)
- assert.Equal(t, []string{gpuIDString}, c.jobsCache[gpuIDString].gpuOrdinals)
+ assert.Equal(t, []string{gpuIDString}, c.jobPropsCache[gpuIDString].gpuOrdinals)
}
// Remove first 10 jobs and add new 10 more jobs
@@ -352,14 +240,14 @@ func TestJobPropsCaching(t *testing.T) {
require.NoError(t, err)
}
- // Now call again get metrics which should populate jobsCache
- _, err = c.getJobsMetrics()
+ // Now call again get metrics which should populate jobPropsCache
+ _, err = c.discoverCgroups()
require.NoError(t, err)
- // Check if jobsCache has only 15 jobs and GPU ordinals are empty
- assert.Len(t, c.jobsCache, 15)
+ // Check if jobPropsCache has only 15 jobs and GPU ordinals are empty
+ assert.Len(t, c.jobPropsCache, 15)
- for _, p := range c.jobsCache {
+ for _, p := range c.jobPropsCache {
assert.Empty(t, p.gpuOrdinals)
}
}
From 85bb33aee0fee7462acf5eb1974cbff4f204b2d8 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Sun, 15 Sep 2024 18:29:08 +0200
Subject: [PATCH 14/18] ci: Install clang in CI workflows
* This will make the Makefile of repo simpler to do everyday development
Signed-off-by: Mahendra Paipuri
---
.github/workflows/step_tests-e2e.yml | 3 +++
.github/workflows/step_tests-unit.yml | 3 +++
pkg/collector/testdata/proc.ttar | 18 ++++++++++++++++++
scripts/e2e-test.sh | 16 ++++++++--------
scripts/install_clang.sh | 3 +++
5 files changed, 35 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/step_tests-e2e.yml b/.github/workflows/step_tests-e2e.yml
index e281fbbd..a4c463e6 100644
--- a/.github/workflows/step_tests-e2e.yml
+++ b/.github/workflows/step_tests-e2e.yml
@@ -17,6 +17,9 @@ jobs:
with:
go-version: 1.22.x
+ - name: Setup clang 18
+ run: ./scripts/install_clang.sh
+
- name: Run e2e tests for Go packages
run: make test-e2e
diff --git a/.github/workflows/step_tests-unit.yml b/.github/workflows/step_tests-unit.yml
index 2c1a7972..5e98fd10 100644
--- a/.github/workflows/step_tests-unit.yml
+++ b/.github/workflows/step_tests-unit.yml
@@ -17,6 +17,9 @@ jobs:
with:
go-version: 1.22.x
+ - name: Setup clang 18
+ run: ./scripts/install_clang.sh
+
- name: Run checkmetrics and checkrules
run: make checkmetrics checkrules
diff --git a/pkg/collector/testdata/proc.ttar b/pkg/collector/testdata/proc.ttar
index ff1d2ccc..60f69947 100644
--- a/pkg/collector/testdata/proc.ttar
+++ b/pkg/collector/testdata/proc.ttar
@@ -9233,6 +9233,24 @@ Node 0, zone DMA32 759 572 791 475 194 45 12 0
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: proc/cgroups
+Lines: 14
+#subsys_name hierarchy num_cgroups enabled
+cpuset 5 3 1
+cpu 6 3 1
+cpuacct 6 3 1
+blkio 12 1 1
+memory 7 123 1
+devices 11 51 1
+freezer 2 3 1
+net_cls 4 3 1
+perf_event 3 1 1
+net_prio 4 3 1
+hugetlb 8 1 1
+pids 9 56 1
+rdma 10 1 1
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: proc/cmdline
Lines: 1
BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7
diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh
index 90ffa223..b3967710 100755
--- a/scripts/e2e-test.sh
+++ b/scripts/e2e-test.sh
@@ -304,10 +304,10 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v1" \
--collector.slurm \
--collector.slurm.gpu-type="nvidia" \
--collector.slurm.nvidia-smi-path="pkg/collector/testdata/nvidia-smi" \
- --collector.slurm.force-cgroups-version="v1" \
--collector.slurm.gpu-job-map-path="pkg/collector/testdata/gpujobmap" \
--collector.ipmi.dcmi.cmd="pkg/collector/testdata/ipmi/freeipmi/ipmi-dcmi" \
--collector.empty-hostname-label \
@@ -321,11 +321,11 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v1" \
+ --collector.cgroup.active-subsystem="memory" \
--collector.slurm \
- --collector.slurm.cgroups-v1-subsystem="memory" \
--collector.slurm.gpu-type="nvidia" \
--collector.slurm.nvidia-smi-path="pkg/collector/testdata/nvidia-smi" \
- --collector.slurm.force-cgroups-version="v1" \
--collector.slurm.gpu-job-map-path="pkg/collector/testdata/gpujobmap" \
--collector.ipmi.dcmi.cmd="pkg/collector/testdata/ipmi/freeipmi/ipmi-dcmi" \
--collector.empty-hostname-label \
@@ -339,10 +339,10 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v2" \
--collector.slurm \
--collector.slurm.gpu-type="nvidia" \
--collector.slurm.nvidia-smi-path="pkg/collector/testdata/nvidia-smi" \
- --collector.slurm.force-cgroups-version="v2" \
--collector.slurm.gpu-job-map-path="pkg/collector/testdata/gpujobmap" \
--collector.empty-hostname-label \
--web.listen-address "127.0.0.1:${port}" \
@@ -355,10 +355,10 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v2" \
--collector.slurm \
--collector.slurm.gpu-type="amd" \
--collector.slurm.rocm-smi-path="pkg/collector/testdata/rocm-smi" \
- --collector.slurm.force-cgroups-version="v2" \
--collector.slurm.gpu-job-map-path="pkg/collector/testdata/gpujobmap" \
--collector.empty-hostname-label \
--web.listen-address "127.0.0.1:${port}" \
@@ -371,8 +371,8 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v2" \
--collector.slurm \
- --collector.slurm.force-cgroups-version="v2" \
--collector.empty-hostname-label \
--web.listen-address "127.0.0.1:${port}" \
--web.disable-exporter-metrics \
@@ -384,10 +384,10 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v2" \
--collector.slurm \
--collector.slurm.gpu-type="nvidia" \
--collector.slurm.nvidia-smi-path="pkg/collector/testdata/nvidia-smi" \
- --collector.slurm.force-cgroups-version="v2" \
--collector.ipmi.dcmi.cmd="pkg/collector/testdata/ipmi/ipmiutils/ipmiutil" \
--collector.empty-hostname-label \
--web.listen-address "127.0.0.1:${port}" \
@@ -400,10 +400,10 @@ then
--path.sysfs="pkg/collector/testdata/sys" \
--path.cgroupfs="pkg/collector/testdata/sys/fs/cgroup" \
--path.procfs="pkg/collector/testdata/proc" \
+ --collector.cgroups.force-version="v2" \
--collector.slurm \
--collector.slurm.gpu-type="amd" \
--collector.slurm.rocm-smi-path="pkg/collector/testdata/rocm-smi" \
- --collector.slurm.force-cgroups-version="v2" \
--collector.slurm.gpu-job-map-path="pkg/collector/testdata/gpujobmap" \
--collector.slurm.swap.memory.metrics \
--collector.slurm.psi.metrics \
diff --git a/scripts/install_clang.sh b/scripts/install_clang.sh
index c1c10869..d13c1aa8 100755
--- a/scripts/install_clang.sh
+++ b/scripts/install_clang.sh
@@ -1,6 +1,9 @@
#!/bin/bash
set -exo pipefail
+# This script only works for Ubuntu derivates and it is meant to be
+# used in CI to install clang in golang builder containers.
+
# Check if clang exists. If it exists, we need to ensure that it
# is at least of version >= 18
if [ -x "$(command -v clang)" ]; then
From 704133aded3af213eb8eecefedc9f708adacf133 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Sun, 15 Sep 2024 18:43:32 +0200
Subject: [PATCH 15/18] ci: Install clang in CI workflows
* Add an empty bpf target for CGO_BUILD apps
Signed-off-by: Mahendra Paipuri
---
.circleci/config.yml | 2 ++
.github/workflows/codeql.yml | 2 ++
Makefile.common | 4 ++--
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index fadf00d2..02a5b646 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -19,6 +19,7 @@ jobs:
steps:
- prometheus/setup_environment
- run: go mod download
+ - run: GOARCH=1 make clang
- run: make
- run: CGO_BUILD=1 make
test-arm:
@@ -26,6 +27,7 @@ jobs:
steps:
- checkout
- run: uname -a
+ - run: GOARCH=1 make clang
- run: make
- run: CGO_BUILD=1 make
build:
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index e0faabad..eb5396e2 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -54,6 +54,8 @@ jobs:
shell: bash
if: ${{ matrix.language == 'go' }}
run: |
+ echo 'Installing clang 18'
+ GOARCH=1 make clang
echo 'Building pure go binaries'
make build
echo 'Building cgo binaries'
diff --git a/Makefile.common b/Makefile.common
index 86830e4f..735eb81c 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -253,10 +253,10 @@ $(PROMU):
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
-# Build bpf assets only when CGO_BUILD=0
-ifeq ($(CGO_BUILD), 0)
# Build bpf assets
.PHONY: bpf
+# Build bpf assets only when CGO_BUILD=0
+ifeq ($(CGO_BUILD), 0)
bpf: clang bpfclean
@echo ">> building bpf assets using clang"
$(MAKE) -C ./pkg/collector/bpf
From e54f71c13a99870ec7efa430cc7373567b5cc344 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Tue, 17 Sep 2024 17:37:30 +0200
Subject: [PATCH 16/18] chore: Increase max entries for maps to get more
expected behaviour in evicting entries
* Keep CPU specific LRU cache for better performance
Signed-off-by: Mahendra Paipuri
---
pkg/collector/bpf/include/compiler.h | 12 +++++++++++-
pkg/collector/bpf/network/bpf_network.h | 3 +++
pkg/collector/bpf/vfs/bpf_vfs.h | 5 +++++
3 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/pkg/collector/bpf/include/compiler.h b/pkg/collector/bpf/include/compiler.h
index 89bb1968..43711056 100644
--- a/pkg/collector/bpf/include/compiler.h
+++ b/pkg/collector/bpf/include/compiler.h
@@ -1,7 +1,17 @@
/* SPDX-License-Identifier: (GPL-3.0-only) */
+/**
+ * Seems like LRU hash map with fewer max entries have unexpected
+ * behaviour.
+ * Ref: https://stackoverflow.com/questions/75882443/elements-incorrectly-evicted-from-ebpf-lru-hash-map
+ *
+ * We noticed in rudimentary tests as well where values are being
+ * evicted even before map is full. So we use bigger maps to
+ * ensure that we get a more LRUish behaviour in production.
+ *
+*/
#ifndef MAX_MAP_ENTRIES
-#define MAX_MAP_ENTRIES 256
+#define MAX_MAP_ENTRIES 4096
#endif
#define FUNC_INLINE static inline __attribute__((always_inline))
diff --git a/pkg/collector/bpf/network/bpf_network.h b/pkg/collector/bpf/network/bpf_network.h
index 00df7a6f..1389bc1f 100644
--- a/pkg/collector/bpf/network/bpf_network.h
+++ b/pkg/collector/bpf/network/bpf_network.h
@@ -36,6 +36,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} ingress_accumulator SEC(".maps");
/* Map to track ingress events */
@@ -44,6 +45,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} egress_accumulator SEC(".maps");
/* Map to track retransmission events */
@@ -52,6 +54,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} retrans_accumulator SEC(".maps");
/**
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.h b/pkg/collector/bpf/vfs/bpf_vfs.h
index 2917d694..b86fad1a 100644
--- a/pkg/collector/bpf/vfs/bpf_vfs.h
+++ b/pkg/collector/bpf/vfs/bpf_vfs.h
@@ -46,6 +46,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
__type(value, struct vfs_rw_event);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} write_accumulator SEC(".maps");
/* Map to track vfs_read events */
@@ -54,6 +55,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
__type(value, struct vfs_rw_event);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} read_accumulator SEC(".maps");
/* Map to track vfs_open events */
@@ -62,6 +64,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} open_accumulator SEC(".maps");
/* Map to track vfs_create events */
@@ -70,6 +73,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} create_accumulator SEC(".maps");
/* Map to track vfs_unlink events */
@@ -78,6 +82,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
+ __type(flags, BPF_F_NO_COMMON_LRU);
} unlink_accumulator SEC(".maps");
/**
From d0a85743de40f8338e273b7af54f22982aac3e68 Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Tue, 17 Sep 2024 17:37:49 +0200
Subject: [PATCH 17/18] docs: Update docs with ebpf and perf collectors
Signed-off-by: Mahendra Paipuri
---
README.md | 27 +-
pkg/collector/slurm.go | 4 +-
website/cspell.json | 4 +-
website/docs/00-introduction.md | 32 +-
website/docs/01-philisophy.md | 110 +-
website/docs/components/ceems-exporter.md | 252 +-
website/docs/configuration/ceems-exporter.md | 56 +-
website/docs/configuration/systemd.md | 44 +-
website/package.json | 4 +
website/yarn.lock | 2394 ++++++------------
10 files changed, 1171 insertions(+), 1756 deletions(-)
diff --git a/README.md b/README.md
index 9256c675..c397c7ea 100644
--- a/README.md
+++ b/README.md
@@ -14,30 +14,35 @@
-Compute Energy & Emissions Monitoring Stack (CEEMS) (pronounced as *kiːms*) contains
-a Prometheus exporter to export metrics of compute instance units and a REST API
+Compute Energy & Emissions Monitoring Stack (CEEMS) (pronounced as *kiːms*) contains
+a Prometheus exporter to export metrics of compute instance units and a REST API
server that serves the metadata and aggregated metrics of each
compute unit. Optionally, it includes a TSDB load balancer that supports basic access
control on TSDB so that one user cannot access metrics of another user.
"Compute Unit" in the current context has a wider scope. It can be a batch job in HPC,
-a VM in cloud, a pod in k8s, _etc_. The main objective of the repository is to quantify
+a VM in cloud, a pod in k8s, *etc*. The main objective of the repository is to quantify
the energy consumed and estimate emissions by each "compute unit". The repository itself
does not provide any frontend apps to show dashboards and it is meant to use along
with Grafana and Prometheus to show statistics to users.
+Although CEEMS was born out of a need to monitor energy and carbon footprint of compute
+workloads, it supports monitoring performance metrics as well. In addition, it leverages
+[eBPF](https://ebpf.io/what-is-ebpf/) framework to monitor IO and network metrics
+in a resource manager agnostic way.
+
## Install CEEMS
-> [!WARNING]
-> DO NOT USE pre-release versions as the API has changed quite a lot between the
+> [!WARNING]
+> DO NOT USE pre-release versions as the API has changed quite a lot between the
pre-release and stable versions.
-Installation instructions of CEEMS components can be found in
+Installation instructions of CEEMS components can be found in
[docs](https://mahendrapaipuri.github.io/ceems/docs/category/installation).
## Visualizing metrics with Grafana
-CEEMS is meant to be used with Grafana for visualization and below are some of the
+CEEMS is meant to be used with Grafana for visualization and below are some of the
screenshots few possible metrics.
### Time series compute unit CPU metrics
@@ -46,7 +51,7 @@ screenshots few possible metrics.
-### Time series compute unit GPU metrics
+### Time series compute unit GPU metrics
@@ -71,9 +76,9 @@ screenshots few possible metrics.
## Contributing
-We welcome contributions to this project, we hope to see this project grow and become
-a useful tool for people who are interested in the energy and carbon footprint of their
+We welcome contributions to this project, we hope to see this project grow and become
+a useful tool for people who are interested in the energy and carbon footprint of their
workloads.
-Please feel free to open issues and/or discussions for any potential ideas of
+Please feel free to open issues and/or discussions for any potential ideas of
improvement.
diff --git a/pkg/collector/slurm.go b/pkg/collector/slurm.go
index 8791a4bd..ae9b2769 100644
--- a/pkg/collector/slurm.go
+++ b/pkg/collector/slurm.go
@@ -43,8 +43,8 @@ var (
"perf software profilers to collect",
).Strings()
slurmPerfCacheProfilersFlag = CEEMSExporterApp.Flag(
- "collector.slurm.perf-cache-events",
- "Enables collection of perf cache events (default: disabled)",
+ "collector.slurm.perf--hardware-cache-events",
+ "Enables collection of perf harware cache events (default: disabled)",
).Default("false").Bool()
slurmPerfCacheProfilers = CEEMSExporterApp.Flag(
"collector.slurm.perf-cache-profilers",
diff --git a/website/cspell.json b/website/cspell.json
index 65741ef9..5ad31c3c 100644
--- a/website/cspell.json
+++ b/website/cspell.json
@@ -44,7 +44,9 @@
"NOPASSWD",
"syscalls",
"BGFS",
- "kiːms"
+ "kiːms",
+ "recvmsg",
+ "udpv"
],
// flagWords - list of words to be always considered incorrect
// This is useful for offensive words and common spelling errors.
diff --git a/website/docs/00-introduction.md b/website/docs/00-introduction.md
index ac5f28e1..7d8e91b6 100644
--- a/website/docs/00-introduction.md
+++ b/website/docs/00-introduction.md
@@ -18,34 +18,34 @@ slug: /
:::warning[WARNING]
-CEEMS is in early development phase, thus subject to breaking changes with no guarantee
+CEEMS is in early development phase, thus subject to breaking changes with no guarantee
of backward compatibility.
:::
-CEEMS provide a set of components that enable operators to monitor the consumption of
-resources of the compute units of different resource managers like SLURM, Openstack and
-Kubernetes.
+CEEMS provide a set of components that enable operators to monitor the consumption of
+resources of the compute units of different resource managers like SLURM, Openstack and
+Kubernetes.
-- CEEMS Prometheus exporter is capable of exporting compute unit metrics including energy
-consumption from different resource managers.
+- CEEMS Prometheus exporter is capable of exporting compute unit metrics including energy
+consumption, performance, IO and network metrics from different resource managers in a
+unified manner.
-- CEEMS API server can store the aggregate metrics and metadata of each compute unit
-originating from different resource manager.
+- CEEMS API server can store the aggregate metrics and metadata of each compute unit
+originating from different resource managers.
-- CEEMS load balancer provides basic access control on TSDB so that compute unit metrics
+- CEEMS load balancer provides basic access control on TSDB so that compute unit metrics
from different projects/tenants/namespaces are isolated.
-"Compute Unit" in the current context has a wider scope. It can be a batch job in HPC,
-a VM in cloud, a pod in k8s, _etc_. The main objective of the stack is to quantify
-the energy consumed and estimate emissions by each "compute unit". The repository itself
-does not provide any frontend apps to show dashboards and it is meant to use along
-with Grafana and Prometheus to show statistics to users.
+"Compute Unit" in the current context has a wider scope. It can be a batch job in HPC,
+a VM in cloud, a pod in k8s, _etc_. The main objective of the stack is to quantify
+the energy consumed and estimate emissions by each "compute unit". The repository itself
+does not provide any frontend apps to show dashboards and it is meant to use along
+with Grafana and Prometheus to show statistics to users.
:::important[Note]
-Currently, only SLURM is supported as a resource manager. In future support for Openstack
+Currently, only SLURM is supported as a resource manager. In future support for Openstack
and Kubernetes will be added.
:::
-
diff --git a/website/docs/01-philisophy.md b/website/docs/01-philisophy.md
index b905744e..00c45279 100644
--- a/website/docs/01-philisophy.md
+++ b/website/docs/01-philisophy.md
@@ -1,61 +1,97 @@
# Philosophy
-## CPU, memory and IO metrics
+## CPU, memory, IO and network metrics
-The idea we are leveraging here is that every resource manager has to resort to cgroups
-on Linux to manage CPU, memory and IO resources. Each resource manager does it
-differently but the take away here is that the accounting information is readily
-available in the cgroups. By walking through the cgroups file system, we can gather the
-metrics that map them to a particular compute unit as resource manager tends to create
+The idea we are leveraging here is that every resource manager has to resort to cgroups
+on Linux to manage CPU, memory and IO resources. Each resource manager does it
+differently but the take away here is that the accounting information is readily
+available in the cgroups. By walking through the cgroups file system, we can gather the
+metrics that map them to a particular compute unit as resource manager tends to create
cgroups for each compute unit with some sort of identifier attached to it.
-This is a distributed approach where exporter will run on each compute node. Whenever
-Prometheus make a scrape request, the exporter will walk through cgroup file system and
-exposes the data to Prometheus. As reading cgroups file system is relatively cheap,
-there is a very little overhead running this daemon service. On average the exporter
-takes less than 20 MB of memory.
+Although cgroups already provide us with rich information about metrics of individual
+compute units, some important metrics are still unavailable at cgroup level notably
+IO and network metrics. It is worth noting that controllers do exist in cgroups for
+network and IO but they
+do not cover all the realworld cases. For instace the IO controller can be used only
+with block devices whereas a lot of realworld applications rely on network file systems.
+Node-level network and IO metrics can be gathered from `/sys` and `/proc` file systems
+relatively easily, however, the challenge here is to monitor those metrics at the cgroup
+level.
+
+CEEMS use [eBPF](https://ebpf.io/what-is-ebpf/) framework to monitor network and IO
+metrics. CEEMS loads bpf programs
+that trace several kernel functions that are in the data path for both IO and network and expose
+those metrics for each cgroup. More importantly, this is done in a way that it is
+agnostic to resource manager and underlying file system. Similarly network metrics for
+TCP and UDP protocols for both IPv4 and IPv6 can be gathered by using carefully crafted
+bpf programs and attaching to relevant kernel functions.
+
+This is a distributed approach where a daemon exporter will run on each compute node. Whenever
+Prometheus make a scrape request, the exporter will walk through cgroup file system and
+bpf program maps and
+exposes the data to Prometheus. As reading cgroups file system is relatively cheap,
+there is a very little overhead running this daemon service. Similarly, BPF programs are
+extremely fast and efficient as they are run in kernel space. On average the exporter
+takes less than 20 MB of memory.
## Energy consumption
In an age where green computing is becoming more and more important, it is essential to
-expose the energy consumed by the compute units to the users to make them more aware.
-Most of energy measurement tools are based on
-[RAPL](https://www.kernel.org/doc/html/next/power/powercap/powercap.html) which reports
-energy consumption from CPU and memory. It does not report consumption from other
-peripherals like PCIe, network, disk, _etc_.
-
-To address this, the current exporter will expose IPMI power statistics in addition to
-RAPL metrics. IPMI measurements are generally made at the node level which includes
-consumption by _most_ of the components. However, the reported energy usage is vendor
-dependent and it is desirable to validate with them before reading too much into the
-numbers. In any case, this is the only complete metric we can get our hands on without
-needing to install any additional hardware like Wattmeters.
+expose the energy consumed by the compute units to the users to make them more aware.
+Most of energy measurement tools are based on
+[RAPL](https://www.kernel.org/doc/html/next/power/powercap/powercap.html) which reports
+energy consumption from CPU and memory. It does not report consumption from other
+peripherals like PCIe, network, disk, _etc_.
+
+To address this, the current exporter will expose IPMI power statistics in addition to
+RAPL metrics. IPMI measurements are generally made at the node level which includes
+consumption by _most_ of the components. However, the reported energy usage is vendor
+dependent and it is desirable to validate with them before reading too much into the
+numbers. In any case, this is the only complete metric we can get our hands on without
+needing to install any additional hardware like Wattmeters.
This node level power consumption can be split into consumption of individual compute units
-by using relative CPU times used by the compute unit. Although, this is not an exact
+by using relative CPU times used by the compute unit. Although, this is not an exact
estimation of power consumed by the compute unit, it stays a very good approximation.
## Emissions
-The exporter is capable of exporting emission factors from different data sources
-which can be used to estimate equivalent CO2 emissions. Currently, for
-France, a _real_ time emission factor will be used that is based on
-[RTE eCO2 mix data](https://www.rte-france.com/en/eco2mix/co2-emissions). Besides,
-retrieving emission factors from [Electricity Maps](https://app.electricitymaps.com/map)
-is also supported provided that API token is provided. Electricity Maps provide
-emission factor data for most of the countries. A static emission factor from historic
-data is also provided from [OWID data](https://github.com/owid/co2-data). Finally, a
+The exporter is capable of exporting emission factors from different data sources
+which can be used to estimate equivalent CO2 emissions. Currently, for
+France, a _real_ time emission factor will be used that is based on
+[RTE eCO2 mix data](https://www.rte-france.com/en/eco2mix/co2-emissions). Besides,
+retrieving emission factors from [Electricity Maps](https://app.electricitymaps.com/map)
+is also supported provided that API token is provided. Electricity Maps provide
+emission factor data for most of the countries. A static emission factor from historic
+data is also provided from [OWID data](https://github.com/owid/co2-data). Finally, a
constant global average emission factor can also be used.
-Emissions collector is capable of exporting emission factors from different sources
+Emissions collector is capable of exporting emission factors from different sources
and users can choose the factor that suits their needs.
## GPU metrics
-Currently, only nVIDIA and AMD GPUs are supported. This exporter leverages
+Currently, only nVIDIA and AMD GPUs are supported. This exporter leverages
[DCGM exporter](https://github.com/NVIDIA/dcgm-exporter/tree/main) for nVIDIA GPUs and
[AMD SMI exporter](https://github.com/amd/amd_smi_exporter) for AMD GPUs to get GPU metrics of
-each compute unit. DCGM/AMD SMI exporters exposes the GPU metrics of each GPU and the
-current exporter takes care of the GPU index to compute unit mapping. These two metrics
-can be used together using PromQL to show the metrics of GPU metrics of a given compute
+each compute unit. DCGM/AMD SMI exporters exposes the GPU metrics of each GPU and the
+current exporter takes care of the GPU index to compute unit mapping. These two metrics
+can be used together using PromQL to show the metrics of GPU metrics of a given compute
unit.
+
+## Performance metrics
+
+Presenting energy and emission metrics is only one side of the story. This will
+help end users to quickly and cheaply identify their workloads that are consuming
+a lot of energy. However, to make those workloads more efficient, we need more
+information about the application _per se_. To address this CEEMS exposes performance
+related metrics fetched from Linux's [perf](https://perf.wiki.kernel.org/index.php/Main_Page)
+subsystem. These metrics help end users to quickly identify the obvious issues with
+their applications and there by improving them and eventually making them more
+energy efficient.
+
+Currently, CEEMS provides performance metrics for CPU. It is possible to gather
+performance metrics for nVIDIA GPUs as well as long as operators install and enable
+nVIDIA DCGM libraries. More details can be found in [DCGM](https://docs.nvidia.com/datacenter/dcgm/latest/user-guide/feature-overview.html#profiling-metrics)
+docs.
diff --git a/website/docs/components/ceems-exporter.md b/website/docs/components/ceems-exporter.md
index 7767622b..deb77379 100644
--- a/website/docs/components/ceems-exporter.md
+++ b/website/docs/components/ceems-exporter.md
@@ -6,11 +6,11 @@ sidebar_position: 1
## Background
-`ceems_exporter` is the Prometheus exporter that exposes individual compute unit
+`ceems_exporter` is the Prometheus exporter that exposes individual compute unit
metrics, RAPL energy, IPMI power consumption, emission factor and GPU to compute unit
mapping.
-Currently, the exporter supports only SLURM resource manager.
+Currently, the exporter supports only SLURM resource manager.
`ceems_exporter` provides following collectors:
- Slurm collector: Exports SLURM job metrics like CPU, memory and GPU indices to job ID maps
@@ -20,31 +20,190 @@ Currently, the exporter supports only SLURM resource manager.
- CPU collector: Exports CPU time in different modes (at node level)
- Meminfo collector: Exports memory related statistics (at node level)
-## Slurm collector
+In addition to above stated collectors, there are common "sub-collectors" that
+can be reused with different collectors. These sub-collectors provide auxiliary
+metrics like IO, networking, performance _etc_. Currently available sub-collectors are:
+
+- Perf sub-collector: Exports hardware, software and cache performance metrics
+- eBPF sub-collector: Exports IO and network related metrics
+
+These sub-collectors are not meant to work alone and they are supposed to be enabled
+from within a main collector.
+
+## Sub-collectors
+
+### Perf sub-collector
+
+Perf sub-collector exports performance related metrics fetched from Linux's
+[perf](https://perf.wiki.kernel.org/index.php/Main_Page) sub-system. Currently,
+it supports hardware, software and hardware cache events. More advanced details
+on perf events can be found in
+[Brendangregg's blogs](https://www.brendangregg.com/perf.html#Events). Currently
+supported events are listed as follows:
+
+#### Hardware events
+
+- Total cycles
+- Retired instructions
+- Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU
+- Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERF_COUNT_HW_CACHE_REFERENCES event to calculate cache miss rates
+- Retired branch instructions
+- Mis-predicted branch instructions
+
+#### Software events
+
+- Number of page faults
+- Number of context switches
+- Number of CPU migrations.
+- Number of minor page faults. These did not require disk I/O to handle.
+- Number of major page faults. These required disk I/O to handle.
+
+#### Hardware cache events
+
+- Number L1 data cache read hits
+- Number L1 data cache read misses
+- Number L1 data cache write hits
+- Number instruction L1 instruction read misses
+- Number instruction TLB read hits
+- Number instruction TLB read misses
+- Number last level read hits
+- Number last level read misses
+- Number last level write hits
+- Number last level write misses
+- Number Branch Prediction Units (BPU) read hits
+- Number Branch Prediction Units (BPU) read misses
+
+### eBPF sub-collector
+
+eBPF sub-collector uses [eBPF](https://ebpf.io/what-is-ebpf/) to monitor network and
+IO statistics. More details on eBPF is out-of-the scope for the current documentation.
+This sub-collector loads various bpf programs that track several kernel functions that
+are relevant to network and IO.
+
+#### IO metrics
+
+The core concept for gathering IO metrics is based on Linux kernel Virtual File system
+layer. From the [docs](https://www.kernel.org/doc/html/latest/filesystems/vfs.html), VFS
+can be defined as:
+
+> The Virtual File System (also known as the Virtual Filesystem Switch) is the software
+layer in the kernel that provides the filesystem interface to userspace programs.
+It also provides an abstraction within the kernel which allows different filesystem
+implementations to coexist.
+
+Thus all the IO activity has to go through the VFS layer. By tracing appropriate functions,
+we can monitor IO metrics. At the same time, these VFS kernel functions has process context
+readily available and so it is possible to attribute each IO operation to a given cgroup.
+By leveraging these two ideas, it is possible to gather IO metrics for each cgroup. The
+following functions are traced in this sub-collector:
+
+- `vfs_read`
+- `vfs_write`
+- `vfs_open`
+- `vfs_create`
+- `vfs_mkdir`
+- `vfs_unlink`
+- `vfs_rmdir`
+
+All the above kernel functions are exported and have fairly stable API. By tracing above
+functions, we will be able to monitor:
+
+- Number of read bytes
+- Number of write bytes
+- Number of read requests
+- Number of write requests
+- Number of read errors
+- Number of write errors
+- Number of open requests
+- Number of open errors
+- Number of create requests
+- Number of create errors
+- Number of unlink requests
+- Number of unlink errors
+
+Read and write statistics are aggregated based on mount points. Most of the production
+workloads use high performance network file systems which are mounted on compute nodes
+at specific mount points. Different may offer different QoS, IOPS capabilities and hence,
+it is beneficial to expose the IO stats on per mountpoint basis instead of aggregating
+statistics from different types of file systems. It is possible to configure CEEMS
+exporter to provide a list of mount points to monitor at runtime.
+
+Rest of the metrics are aggregated globally due to complexity in retrieving the mount
+point information from kernel function arguments.
+
+:::note[NOTE]
+
+Total aggregate statistics should be very accurate for each cgroup. However, if underlying
+file system uses async IO, the IO rate statistics might not reflect true rate as kernel
+functions return immediately after submitting IO task to the driver of underlying filesystem.
+In the case of sync IO, kernel function blocks until IO operation has finished and thus, we
+get accurate rate statistics.
-Slurm collector exports the job related metrics like usage of CPU, DRAM, RDMA, _etc_.
-This is done by walking through the cgroups created by SLURM daemon on compute node on
-every scrape request. As walking through the cgroups pseudo file system is _very cheap_,
+:::
+
+IO data path is highly complex with a lot of caching involved for several filesystem drivers.
+The statistics reported by these bpf programs are the ones "observed" by the user's workloads
+rather than from filesystem's perspective. The advantage of this approach is that we can use
+these bpf programs to monitor different types of filesystems in an unified manner without having
+to support different filesystems separately.
+
+#### Network metrics
+
+The eBPF sub-collector traces kernel functions that monitor following types of network
+events:
+
+- TCP with IPv4 and IPv6
+- UDP with IPv4 and IPv6
+
+Most of the production workloads using TCP/UDP for communication and hence, only these
+two protocols are supported. This is done by tracing following kernel functions:
+
+- `tcp_sendmsg`
+- `tcp_sendpage` for kernels < 6.5
+- `tcp_recvmsg`
+- `udp_sendmsg`
+- `udp_sendpage` for kernels < 6.5
+- `udp_recvmsg`
+- `udpv6_sendmsg`
+- `udpv6_recvmsg`
+
+The following are provided by tracing above functions. All the metrics are provided
+per protocol (TCP/UDP) and per IP family (IPv4/IPv6).
+
+- Number of egress bytes
+- Number of egress packets
+- Number of ingress bytes
+- Number of ingress packets
+- Number of retransmission bytes (only for TCP)
+- Number of retransmission packets (only for TCP)
+
+## Collectors
+
+### Slurm collector
+
+Slurm collector exports the job related metrics like usage of CPU, DRAM, RDMA, _etc_.
+This is done by walking through the cgroups created by SLURM daemon on compute node on
+every scrape request. As walking through the cgroups pseudo file system is _very cheap_,
this will zero zero to negligible impact on the actual job.
-The exporter has been heavily inspired by
-[cgroups_exporter](https://github.com/treydock/cgroup_exporter) and it supports both
-cgroups **v1** and **v2**. For jobs with GPUs, we must the GPU ordinals allocated to
-each job so that we can match GPU metrics scrapped by either
-[dcgm-exporter](https://github.com/NVIDIA/dcgm-exporter) or
-[amd-smi-exporter](https://github.com/amd/amd_smi_exporter) to jobs. Unfortunately,
-this information is not available post-mortem of the job and hence, we need to export
-the mapping related to job ID to GPU ordinals.
+The exporter has been heavily inspired by
+[cgroups_exporter](https://github.com/treydock/cgroup_exporter) and it supports both
+cgroups **v1** and **v2**. For jobs with GPUs, we must the GPU ordinals allocated to
+each job so that we can match GPU metrics scrapped by either
+[dcgm-exporter](https://github.com/NVIDIA/dcgm-exporter) or
+[amd-smi-exporter](https://github.com/amd/amd_smi_exporter) to jobs. Unfortunately,
+this information is not available post-mortem of the job and hence, we need to export
+the mapping related to job ID to GPU ordinals.
:::warning[WARNING]
-For SLURM collector to work properly, SLURM needs to be configured well to use all
-the available cgroups controllers. At least `cpu` and `memory` controllers must be
-enabled, if not cgroups will not contain any accounting information. Without `cpu`
-and `memory` accounting information, it is not possible to estimate energy consumption
+For SLURM collector to work properly, SLURM needs to be configured well to use all
+the available cgroups controllers. At least `cpu` and `memory` controllers must be
+enabled, if not cgroups will not contain any accounting information. Without `cpu`
+and `memory` accounting information, it is not possible to estimate energy consumption
of the job.
-More details on how to configure SLURM to get accounting information from cgroups can
+More details on how to configure SLURM to get accounting information from cgroups can
be found in [Configuration](../configuration/resource-managers.md) section.
:::
@@ -67,17 +226,22 @@ Currently, the list of job related metrics exported by SLURM exporter are as fol
- Job to GPU ordinal mapping (when GPUs found on the compute node)
- Current number of jobs on the compute node
-More information on the metrics can be found in kernel documentation of
-[cgroups v1](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt) and
-[cgroups v2](https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git/tree/Documentation/admin-guide/cgroup-v2.rst).
+More information on the metrics can be found in kernel documentation of
+[cgroups v1](https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt) and
+[cgroups v2](https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git/tree/Documentation/admin-guide/cgroup-v2.rst).
+
+Slurm collector supports [perf](./ceems-exporter.md#perf-sub-collector)
+and [eBPF](./ceems-exporter.md#ebpf-sub-collector) sub-collectors. Hence, in
+addition to above stated metrics, all the metrics available in the sub-collectors
+can also be reported for each cgroup.
-## IPMI collector
+### IPMI collector
-The IPMI collector reports the current power usage by the node reported by
-[IPMI DCMI](https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/dcmi-v1-5-rev-spec.pdf)
-command specification. Generally IPMI DCMI is available on all types of nodes and
-manufacturers as it is needed for BMC control. There are several IPMI implementation
-available like FreeIPMI, OpenIPMI, IPMIUtil, _etc._ As IPMI DCMI specification is
+The IPMI collector reports the current power usage by the node reported by
+[IPMI DCMI](https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/dcmi-v1-5-rev-spec.pdf)
+command specification. Generally IPMI DCMI is available on all types of nodes and
+manufacturers as it is needed for BMC control. There are several IPMI implementation
+available like FreeIPMI, OpenIPMI, IPMIUtil, _etc._ As IPMI DCMI specification is
standardized, different implementations must report the same power usage value of the node.
Currently, the metrics exposed by IPMI collector are:
@@ -90,44 +254,44 @@ Currently, the metrics exposed by IPMI collector are:
Current exporter is capable of auto detecting the IPMI implementation and using
the one that is found.
-## RAPL collector
+### RAPL collector
-RAPL collector reports the power consumption of CPU and DRAM (when available) using
-Running Average Power Limit (RAPL) framework. The exporter uses powercap to fetch the
-energy counters.
+RAPL collector reports the power consumption of CPU and DRAM (when available) using
+Running Average Power Limit (RAPL) framework. The exporter uses powercap to fetch the
+energy counters.
List of metrics exported by RAPL collector are:
- RAPL package counters
- RAPL DRAM counters (when available)
-If the CPU architecture supports more RAPL domains otherthan CPU and DRAM, they will be
+If the CPU architecture supports more RAPL domains otherthan CPU and DRAM, they will be
exported as well.
-## Emissions collector
+### Emissions collector
-Emissions collector exports emissions factors from different sources. Depending on the
-source, these factors can be static or dynamic, _i.e.,_ varying in time. Currently,
+Emissions collector exports emissions factors from different sources. Depending on the
+source, these factors can be static or dynamic, _i.e.,_ varying in time. Currently,
different sources supported by the exporter are:
-- [Electricity Maps](https://app.electricitymaps.com/map) which is capable of providing
+- [Electricity Maps](https://app.electricitymaps.com/map) which is capable of providing
real time emission factors for different countries.
- [RTE eCO2 Mix](https://www.rte-france.com/en/eco2mix/co2-emissions) provides real time
emission factor for **only France**.
-- [OWID](https://ourworldindata.org/co2-and-greenhouse-gas-emissions) provides a static
+- [OWID](https://ourworldindata.org/co2-and-greenhouse-gas-emissions) provides a static
emission factors for different countries based on historical data.
- A world average value that is based on the data of available data of the world countries.
The exporter will export the emission factors of all available countries from different
sources.
-## CPU and meminfo collectors
+### CPU and meminfo collectors
Both collectors export node level metrics. CPU collector export CPU time in different
-modes by parsing `/proc/stat` file. Similarly, meminfo collector exports memory usage
-statistics by parsing `/proc/meminfo` file. These collectors are heavily inspired from
-[`node_exporter`](https://github.com/prometheus/node_exporter).
+modes by parsing `/proc/stat` file. Similarly, meminfo collector exports memory usage
+statistics by parsing `/proc/meminfo` file. These collectors are heavily inspired from
+[`node_exporter`](https://github.com/prometheus/node_exporter).
-These metrics are mainly used to estimate the proportion of CPU and memory usage by the
-individual compute units and to estimate the energy consumption of compute unit
+These metrics are mainly used to estimate the proportion of CPU and memory usage by the
+individual compute units and to estimate the energy consumption of compute unit
based on these proportions.
diff --git a/website/docs/configuration/ceems-exporter.md b/website/docs/configuration/ceems-exporter.md
index 01ef9dbb..a6b30abc 100644
--- a/website/docs/configuration/ceems-exporter.md
+++ b/website/docs/configuration/ceems-exporter.md
@@ -95,14 +95,64 @@ ceems_exporter --collector.slum --collector.slurm.gpu-job-map-path=/run/gpujobma
With above configuration, the exporter should export GPU ordinal mapping
along with other metrics of slurm collector.
+As discussed in [Components](../components/ceems-exporter.md#slurm-collector), Slurm
+collector supports [perf](../components/ceems-exporter.md#perf-sub-collector) and
+[eBPF](../components/ceems-exporter.md#ebpf-sub-collector) sub-collectors. These
+sub-collectors can be enabled using following CLI flags:
+
+:::warning[WARNING]
+
+eBPF sub-collector needs a kernel version `>= 5.8`.
+
+:::
+
+```bash
+ceems_exporter --collector.slurm --collector.slurm.perf-hardware-events --collector.slurm.perf-software-events --collector.slurm.perf-hardware-cache-events --collector.slurm.io-metrics --collector.slurm.network-metrics
+```
+
+The above command will enable hardware, software and hardware cache perf metrics along
+with IO and network metrics retrieved by eBPF sub-collector.
+
+In production, users may not wish to profile their codes _all the time_ even though
+the overhead induced by these monitoring these metrics is negligible. In order to
+tackle this usecase, collection of perf metrics can be triggered by the presence of
+a configured environment variable. Operators need to choose an environment variable(s)
+name and configure it with the exporter as follows:
+
+```bash
+ceems_exporter --collector.slurm --collector.slurm.perf-hardware-events --collector.slurm.perf-software-events --collector.slurm.perf-hardware-cache-events --collector.slurm.perf-env-var=CEEMS_ENABLE_PERF --collector.slurm.perf-env-var=ENABLE_PERF
+```
+
+The above example command will enable all available perf metrics and monitor the processes
+in a SLURM job, _only if one of `CEEMS_ENABLE_PERF` or `ENABLE_PERF` environment variable is set_.
+
+:::note[NOTE]
+
+As demonstrated in the example, more than one environment variable can be configured and
+presence of at least one of the configured environment variables is enough to trigger
+the perf metrics monitoring.
+
+:::
+
+The presence of environment variable is enough to trigger the monitoring of perf metrics and
+the value of the environment variable is not checked. Thus, an environment variable like
+`CEEMS_ENABLE_PERF=false` will trigger the perf metrics monitoring. The operators need to
+inform their end users to set one of these configured environment variables in their
+workflows to have the perf metrics monitored.
+
:::important[IMPORTANT]
-The CLI option `--collector.slurm.gpu-job-map-path`
-is hidden and cannot be seen in `ceems_exporter --help` output. However, this option
-exists in the exporter and can be used.
+This way of controlling the monitoring of metrics is only applicable to perf events namely,
+hardware, software and hardware cache events. Unfortunately there is no easy way to use a
+similar approach for IO and network metrics which are provided by eBPF sub-collector. This
+is due to the fact that these metrics are collected in the kernel space and ability to
+enable and disable them at runtime is more involved.
:::
+Both perf and eBPF sub-collectors extra privileges to work and the necessary privileges
+are discussed in [Systemd](./systemd.md) section.
+
## IPMI collector
Currently, collector supports FreeIPMI, OpenIMPI, IPMIUtils and Cray's [`capmc`](https://cray-hpe.github.io/docs-csm/en-10/operations/power_management/cray_advanced_platform_monitoring_and_control_capmc/)
diff --git a/website/docs/configuration/systemd.md b/website/docs/configuration/systemd.md
index 7508527f..87358e0d 100644
--- a/website/docs/configuration/systemd.md
+++ b/website/docs/configuration/systemd.md
@@ -4,49 +4,49 @@ sidebar_position: 6
# Systemd
-If CEEMS components are installed using [RPM/DEB packages](../installation/os-packages.md), a basic
-systemd unit file will be installed to start the service. However, when they are
-installed manually using [pre-compiled binaries](../installation/pre-compiled-binaries.md), it is
+If CEEMS components are installed using [RPM/DEB packages](../installation/os-packages.md), a basic
+systemd unit file will be installed to start the service. However, when they are
+installed manually using [pre-compiled binaries](../installation/pre-compiled-binaries.md), it is
necessary to install and configure `systemd` unit files to manage the service.
## Privileges
CEEMS exporter executes IPMI DCMI command which is a privileged command. Thus, a trivial
-solution is to run CEEMS exporter as `root`. Similarly, in order to which GPU has been
-assigned to which compute unit, we need to either introspect compute unit's
+solution is to run CEEMS exporter as `root`. Similarly, in order to which GPU has been
+assigned to which compute unit, we need to either introspect compute unit's
environment variables (for SLURM resource manager), `libvirt` XML file for VM (for Openstack),
-_etc_. These actions need privileges as well. However, most of the other collectors of
-CEEMS exporter do not need additional privileges and hence, running the exporter
-as `root` is an overkill. We can leverage
-[Linux Capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) to
+_etc_. These actions need privileges as well. However, most of the other collectors of
+CEEMS exporter do not need additional privileges and hence, running the exporter
+as `root` is an overkill. We can leverage
+[Linux Capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) to
assign just the necessary privileges to the process.
## Linux capabilities
-Linux capabilities can be assigned to either file or process. For instance, capabilities
+Linux capabilities can be assigned to either file or process. For instance, capabilities
on the `ceems_exporter` and `ceems_api_server` binaries can be set as follows:
-```
+```bash
sudo setcap cap_sys_ptrace,cap_dac_read_search,cap_setuid,cap_setgid+ep /full/path/to/ceems_exporter
sudo setcap cap_setuid,cap_setgid+ep /full/path/to/ceems_api_server
```
-This will assign all the capabilities that are necessary to run `ceems_exporter`
-for all the collectors. Using file based capabilities will
-expose those capabilities to anyone on the system that have execute permissions on the
-binary. Although, it does not pose a big security concern, it is better to assign
-capabilities to a process.
+This will assign all the capabilities that are necessary to run `ceems_exporter`
+for all the collectors. Using file based capabilities will
+expose those capabilities to anyone on the system that have execute permissions on the
+binary. Although, it does not pose a big security concern, it is better to assign
+capabilities to a process.
-As operators tend to run the exporter within a `systemd` unit file, we can assign
-capabilities to the process rather than file using `AmbientCapabilities`
+As operators tend to run the exporter within a `systemd` unit file, we can assign
+capabilities to the process rather than file using `AmbientCapabilities`
directive of the `systemd`. An example is as follows:
-```
+```ini
[Service]
-ExecStart=/usr/local/bin/ceems_exporter
+ExecStart=/usr/local/bin/ceems_exporter --collector.slurm
AmbientCapabilities=CAP_SYS_PTRACE CAP_DAC_READ_SEARCH CAP_SETUID CAP_SETGID
```
-Note that it is bare minimum service file and it is only to demonstrate on how to use
-`AmbientCapabilities`. Production ready [service files examples]((https://github.com/mahendrapaipuri/ceems/tree/main/build/package))
+Note that it is bare minimum service file and it is only to demonstrate on how to use
+`AmbientCapabilities`. Production ready [service files examples]((https://github.com/mahendrapaipuri/ceems/tree/main/build/package))
are provided in repo.
diff --git a/website/package.json b/website/package.json
index 84539ee8..aad825b4 100644
--- a/website/package.json
+++ b/website/package.json
@@ -50,5 +50,9 @@
},
"engines": {
"node": ">=18.0"
+ },
+ "resolutions": {
+ "react-router/**/path-to-regexp": "1.9.0",
+ "express/**/path-to-regexp": "0.1.10"
}
}
diff --git a/website/yarn.lock b/website/yarn.lock
index 5418ec4a..e3d625d1 100644
--- a/website/yarn.lock
+++ b/website/yarn.lock
@@ -29,131 +29,131 @@
resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz#2e22e830d36f0a9cf2c0ccd3c7f6d59435b77dfa"
integrity sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==
-"@algolia/cache-browser-local-storage@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.23.3.tgz#0cc26b96085e1115dac5fcb9d826651ba57faabc"
- integrity sha512-vRHXYCpPlTDE7i6UOy2xE03zHF2C8MEFjPN2v7fRbqVpcOvAUQK81x3Kc21xyb5aSIpYCjWCZbYZuz8Glyzyyg==
- dependencies:
- "@algolia/cache-common" "4.23.3"
-
-"@algolia/cache-common@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/cache-common/-/cache-common-4.23.3.tgz#3bec79092d512a96c9bfbdeec7cff4ad36367166"
- integrity sha512-h9XcNI6lxYStaw32pHpB1TMm0RuxphF+Ik4o7tcQiodEdpKK+wKufY6QXtba7t3k8eseirEMVB83uFFF3Nu54A==
-
-"@algolia/cache-in-memory@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/cache-in-memory/-/cache-in-memory-4.23.3.tgz#3945f87cd21ffa2bec23890c85305b6b11192423"
- integrity sha512-yvpbuUXg/+0rbcagxNT7un0eo3czx2Uf0y4eiR4z4SD7SiptwYTpbuS0IHxcLHG3lq22ukx1T6Kjtk/rT+mqNg==
- dependencies:
- "@algolia/cache-common" "4.23.3"
-
-"@algolia/client-account@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/client-account/-/client-account-4.23.3.tgz#8751bbf636e6741c95e7c778488dee3ee430ac6f"
- integrity sha512-hpa6S5d7iQmretHHF40QGq6hz0anWEHGlULcTIT9tbUssWUriN9AUXIFQ8Ei4w9azD0hc1rUok9/DeQQobhQMA==
- dependencies:
- "@algolia/client-common" "4.23.3"
- "@algolia/client-search" "4.23.3"
- "@algolia/transporter" "4.23.3"
-
-"@algolia/client-analytics@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-4.23.3.tgz#f88710885278fe6fb6964384af59004a5a6f161d"
- integrity sha512-LBsEARGS9cj8VkTAVEZphjxTjMVCci+zIIiRhpFun9jGDUlS1XmhCW7CTrnaWeIuCQS/2iPyRqSy1nXPjcBLRA==
- dependencies:
- "@algolia/client-common" "4.23.3"
- "@algolia/client-search" "4.23.3"
- "@algolia/requester-common" "4.23.3"
- "@algolia/transporter" "4.23.3"
-
-"@algolia/client-common@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-4.23.3.tgz#891116aa0db75055a7ecc107649f7f0965774704"
- integrity sha512-l6EiPxdAlg8CYhroqS5ybfIczsGUIAC47slLPOMDeKSVXYG1n0qGiz4RjAHLw2aD0xzh2EXZ7aRguPfz7UKDKw==
- dependencies:
- "@algolia/requester-common" "4.23.3"
- "@algolia/transporter" "4.23.3"
-
-"@algolia/client-personalization@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-4.23.3.tgz#35fa8e5699b0295fbc400a8eb211dc711e5909db"
- integrity sha512-3E3yF3Ocr1tB/xOZiuC3doHQBQ2zu2MPTYZ0d4lpfWads2WTKG7ZzmGnsHmm63RflvDeLK/UVx7j2b3QuwKQ2g==
- dependencies:
- "@algolia/client-common" "4.23.3"
- "@algolia/requester-common" "4.23.3"
- "@algolia/transporter" "4.23.3"
-
-"@algolia/client-search@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-4.23.3.tgz#a3486e6af13a231ec4ab43a915a1f318787b937f"
- integrity sha512-P4VAKFHqU0wx9O+q29Q8YVuaowaZ5EM77rxfmGnkHUJggh28useXQdopokgwMeYw2XUht49WX5RcTQ40rZIabw==
- dependencies:
- "@algolia/client-common" "4.23.3"
- "@algolia/requester-common" "4.23.3"
- "@algolia/transporter" "4.23.3"
+"@algolia/cache-browser-local-storage@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz#97bc6d067a9fd932b9c922faa6b7fd6e546e1348"
+ integrity sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==
+ dependencies:
+ "@algolia/cache-common" "4.24.0"
+
+"@algolia/cache-common@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/cache-common/-/cache-common-4.24.0.tgz#81a8d3a82ceb75302abb9b150a52eba9960c9744"
+ integrity sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==
+
+"@algolia/cache-in-memory@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz#ffcf8872f3a10cb85c4f4641bdffd307933a6e44"
+ integrity sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==
+ dependencies:
+ "@algolia/cache-common" "4.24.0"
+
+"@algolia/client-account@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/client-account/-/client-account-4.24.0.tgz#eba7a921d828e7c8c40a32d4add21206c7fe12f1"
+ integrity sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==
+ dependencies:
+ "@algolia/client-common" "4.24.0"
+ "@algolia/client-search" "4.24.0"
+ "@algolia/transporter" "4.24.0"
+
+"@algolia/client-analytics@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-4.24.0.tgz#9d2576c46a9093a14e668833c505ea697a1a3e30"
+ integrity sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==
+ dependencies:
+ "@algolia/client-common" "4.24.0"
+ "@algolia/client-search" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/transporter" "4.24.0"
+
+"@algolia/client-common@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-4.24.0.tgz#77c46eee42b9444a1d1c1583a83f7df4398a649d"
+ integrity sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==
+ dependencies:
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/transporter" "4.24.0"
+
+"@algolia/client-personalization@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-4.24.0.tgz#8b47789fb1cb0f8efbea0f79295b7c5a3850f6ae"
+ integrity sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==
+ dependencies:
+ "@algolia/client-common" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/transporter" "4.24.0"
+
+"@algolia/client-search@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-4.24.0.tgz#75e6c02d33ef3e0f34afd9962c085b856fc4a55f"
+ integrity sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==
+ dependencies:
+ "@algolia/client-common" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/transporter" "4.24.0"
"@algolia/events@^4.0.1":
version "4.0.1"
resolved "https://registry.yarnpkg.com/@algolia/events/-/events-4.0.1.tgz#fd39e7477e7bc703d7f893b556f676c032af3950"
integrity sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==
-"@algolia/logger-common@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/logger-common/-/logger-common-4.23.3.tgz#35c6d833cbf41e853a4f36ba37c6e5864920bfe9"
- integrity sha512-y9kBtmJwiZ9ZZ+1Ek66P0M68mHQzKRxkW5kAAXYN/rdzgDN0d2COsViEFufxJ0pb45K4FRcfC7+33YB4BLrZ+g==
-
-"@algolia/logger-console@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/logger-console/-/logger-console-4.23.3.tgz#30f916781826c4db5f51fcd9a8a264a06e136985"
- integrity sha512-8xoiseoWDKuCVnWP8jHthgaeobDLolh00KJAdMe9XPrWPuf1by732jSpgy2BlsLTaT9m32pHI8CRfrOqQzHv3A==
- dependencies:
- "@algolia/logger-common" "4.23.3"
-
-"@algolia/recommend@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/recommend/-/recommend-4.23.3.tgz#53d4f194d22d9c72dc05f3f7514c5878f87c5890"
- integrity sha512-9fK4nXZF0bFkdcLBRDexsnGzVmu4TSYZqxdpgBW2tEyfuSSY54D4qSRkLmNkrrz4YFvdh2GM1gA8vSsnZPR73w==
- dependencies:
- "@algolia/cache-browser-local-storage" "4.23.3"
- "@algolia/cache-common" "4.23.3"
- "@algolia/cache-in-memory" "4.23.3"
- "@algolia/client-common" "4.23.3"
- "@algolia/client-search" "4.23.3"
- "@algolia/logger-common" "4.23.3"
- "@algolia/logger-console" "4.23.3"
- "@algolia/requester-browser-xhr" "4.23.3"
- "@algolia/requester-common" "4.23.3"
- "@algolia/requester-node-http" "4.23.3"
- "@algolia/transporter" "4.23.3"
-
-"@algolia/requester-browser-xhr@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.23.3.tgz#9e47e76f60d540acc8b27b4ebc7a80d1b41938b9"
- integrity sha512-jDWGIQ96BhXbmONAQsasIpTYWslyjkiGu0Quydjlowe+ciqySpiDUrJHERIRfELE5+wFc7hc1Q5hqjGoV7yghw==
- dependencies:
- "@algolia/requester-common" "4.23.3"
-
-"@algolia/requester-common@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.23.3.tgz#7dbae896e41adfaaf1d1fa5f317f83a99afb04b3"
- integrity sha512-xloIdr/bedtYEGcXCiF2muajyvRhwop4cMZo+K2qzNht0CMzlRkm8YsDdj5IaBhshqfgmBb3rTg4sL4/PpvLYw==
-
-"@algolia/requester-node-http@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-4.23.3.tgz#c9f94a5cb96a15f48cea338ab6ef16bbd0ff989f"
- integrity sha512-zgu++8Uj03IWDEJM3fuNl34s746JnZOWn1Uz5taV1dFyJhVM/kTNw9Ik7YJWiUNHJQXcaD8IXD1eCb0nq/aByA==
- dependencies:
- "@algolia/requester-common" "4.23.3"
-
-"@algolia/transporter@4.23.3":
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/@algolia/transporter/-/transporter-4.23.3.tgz#545b045b67db3850ddf0bbecbc6c84ff1f3398b7"
- integrity sha512-Wjl5gttqnf/gQKJA+dafnD0Y6Yw97yvfY8R9h0dQltX1GXTgNs1zWgvtWW0tHl1EgMdhAyw189uWiZMnL3QebQ==
- dependencies:
- "@algolia/cache-common" "4.23.3"
- "@algolia/logger-common" "4.23.3"
- "@algolia/requester-common" "4.23.3"
+"@algolia/logger-common@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/logger-common/-/logger-common-4.24.0.tgz#28d439976019ec0a46ba7a1a739ef493d4ef8123"
+ integrity sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==
+
+"@algolia/logger-console@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/logger-console/-/logger-console-4.24.0.tgz#c6ff486036cd90b81d07a95aaba04461da7e1c65"
+ integrity sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==
+ dependencies:
+ "@algolia/logger-common" "4.24.0"
+
+"@algolia/recommend@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/recommend/-/recommend-4.24.0.tgz#8a3f78aea471ee0a4836b78fd2aad4e9abcaaf34"
+ integrity sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==
+ dependencies:
+ "@algolia/cache-browser-local-storage" "4.24.0"
+ "@algolia/cache-common" "4.24.0"
+ "@algolia/cache-in-memory" "4.24.0"
+ "@algolia/client-common" "4.24.0"
+ "@algolia/client-search" "4.24.0"
+ "@algolia/logger-common" "4.24.0"
+ "@algolia/logger-console" "4.24.0"
+ "@algolia/requester-browser-xhr" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/requester-node-http" "4.24.0"
+ "@algolia/transporter" "4.24.0"
+
+"@algolia/requester-browser-xhr@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz#313c5edab4ed73a052e75803855833b62dd19c16"
+ integrity sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==
+ dependencies:
+ "@algolia/requester-common" "4.24.0"
+
+"@algolia/requester-common@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.24.0.tgz#1c60c198031f48fcdb9e34c4057a3ea987b9a436"
+ integrity sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==
+
+"@algolia/requester-node-http@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz#4461593714031d02aa7da221c49df675212f482f"
+ integrity sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==
+ dependencies:
+ "@algolia/requester-common" "4.24.0"
+
+"@algolia/transporter@4.24.0":
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/@algolia/transporter/-/transporter-4.24.0.tgz#226bb1f8af62430374c1972b2e5c8580ab275102"
+ integrity sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==
+ dependencies:
+ "@algolia/cache-common" "4.24.0"
+ "@algolia/logger-common" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
"@ampproject/remapping@^2.2.0":
version "2.3.0"
@@ -163,15 +163,7 @@
"@jridgewell/gen-mapping" "^0.3.5"
"@jridgewell/trace-mapping" "^0.3.24"
-"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.23.5", "@babel/code-frame@^7.24.2", "@babel/code-frame@^7.8.3":
- version "7.24.2"
- resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.2.tgz#718b4b19841809a58b29b68cde80bc5e1aa6d9ae"
- integrity sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ==
- dependencies:
- "@babel/highlight" "^7.24.2"
- picocolors "^1.0.0"
-
-"@babel/code-frame@^7.24.7":
+"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.24.7", "@babel/code-frame@^7.8.3":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.7.tgz#882fd9e09e8ee324e496bd040401c6f046ef4465"
integrity sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==
@@ -179,17 +171,12 @@
"@babel/highlight" "^7.24.7"
picocolors "^1.0.0"
-"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.23.5", "@babel/compat-data@^7.24.4":
- version "7.24.4"
- resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.24.4.tgz#6f102372e9094f25d908ca0d34fc74c74606059a"
- integrity sha512-vg8Gih2MLK+kOkHJp4gBEIkyaIi00jgWot2D9QOmmfLC8jINSOzmCLta6Bvz/JSBCqnegV0L80jhxkol5GWNfQ==
-
-"@babel/compat-data@^7.25.2", "@babel/compat-data@^7.25.4":
+"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.2", "@babel/compat-data@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.4.tgz#7d2a80ce229890edcf4cc259d4d696cb4dae2fcb"
integrity sha512-+LGRog6RAsCJrrrg/IO6LGmpphNe5DiK30dGjCoxxeGv49B10/3XYGxPsAwrDlMFcFEvdAUavDT8r9k/hSyQqQ==
-"@babel/core@^7.21.3":
+"@babel/core@^7.21.3", "@babel/core@^7.23.3":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.2.tgz#ed8eec275118d7613e77a352894cd12ded8eba77"
integrity sha512-BBt3opiCOxUr9euZ5/ro/Xv8/V7yJ5bjYMqG/C1YAo8MIKAnumZalCN+msbci3Pigy4lIQfPUpfMM27HMGaYEA==
@@ -210,38 +197,7 @@
json5 "^2.2.3"
semver "^6.3.1"
-"@babel/core@^7.23.3":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.24.5.tgz#15ab5b98e101972d171aeef92ac70d8d6718f06a"
- integrity sha512-tVQRucExLQ02Boi4vdPp49svNGcfL2GhdTCT9aldhXgCJVAI21EtRfBettiuLUwce/7r6bFdgs6JFkcdTiFttA==
- dependencies:
- "@ampproject/remapping" "^2.2.0"
- "@babel/code-frame" "^7.24.2"
- "@babel/generator" "^7.24.5"
- "@babel/helper-compilation-targets" "^7.23.6"
- "@babel/helper-module-transforms" "^7.24.5"
- "@babel/helpers" "^7.24.5"
- "@babel/parser" "^7.24.5"
- "@babel/template" "^7.24.0"
- "@babel/traverse" "^7.24.5"
- "@babel/types" "^7.24.5"
- convert-source-map "^2.0.0"
- debug "^4.1.0"
- gensync "^1.0.0-beta.2"
- json5 "^2.2.3"
- semver "^6.3.1"
-
-"@babel/generator@^7.23.3", "@babel/generator@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.24.5.tgz#e5afc068f932f05616b66713e28d0f04e99daeb3"
- integrity sha512-x32i4hEXvr+iI0NEoEfDKzlemF8AmtOP8CcrRaEcpzysWuoEb1KknpcvMsHKPONoKZiDuItklgWhB18xEhr9PA==
- dependencies:
- "@babel/types" "^7.24.5"
- "@jridgewell/gen-mapping" "^0.3.5"
- "@jridgewell/trace-mapping" "^0.3.25"
- jsesc "^2.5.1"
-
-"@babel/generator@^7.25.0", "@babel/generator@^7.25.6":
+"@babel/generator@^7.23.3", "@babel/generator@^7.25.0", "@babel/generator@^7.25.6":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.25.6.tgz#0df1ad8cb32fe4d2b01d8bf437f153d19342a87c"
integrity sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==
@@ -251,13 +207,6 @@
"@jridgewell/trace-mapping" "^0.3.25"
jsesc "^2.5.1"
-"@babel/helper-annotate-as-pure@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz#e7f06737b197d580a01edf75d97e2c8be99d3882"
- integrity sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==
- dependencies:
- "@babel/types" "^7.22.5"
-
"@babel/helper-annotate-as-pure@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz#5373c7bc8366b12a033b4be1ac13a206c6656aab"
@@ -265,13 +214,6 @@
dependencies:
"@babel/types" "^7.24.7"
-"@babel/helper-builder-binary-assignment-operator-visitor@^7.22.15":
- version "7.22.15"
- resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz#5426b109cf3ad47b91120f8328d8ab1be8b0b956"
- integrity sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==
- dependencies:
- "@babel/types" "^7.22.15"
-
"@babel/helper-builder-binary-assignment-operator-visitor@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz#37d66feb012024f2422b762b9b2a7cfe27c7fba3"
@@ -280,18 +222,7 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
-"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.23.6":
- version "7.23.6"
- resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991"
- integrity sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==
- dependencies:
- "@babel/compat-data" "^7.23.5"
- "@babel/helper-validator-option" "^7.23.5"
- browserslist "^4.22.2"
- lru-cache "^5.1.1"
- semver "^6.3.1"
-
-"@babel/helper-compilation-targets@^7.24.7", "@babel/helper-compilation-targets@^7.24.8", "@babel/helper-compilation-targets@^7.25.2":
+"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.24.7", "@babel/helper-compilation-targets@^7.24.8", "@babel/helper-compilation-targets@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz#e1d9410a90974a3a5a66e84ff55ef62e3c02d06c"
integrity sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==
@@ -302,21 +233,6 @@
lru-cache "^5.1.1"
semver "^6.3.1"
-"@babel/helper-create-class-features-plugin@^7.24.1", "@babel/helper-create-class-features-plugin@^7.24.4", "@babel/helper-create-class-features-plugin@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.5.tgz#7d19da92c7e0cd8d11c09af2ce1b8e7512a6e723"
- integrity sha512-uRc4Cv8UQWnE4NXlYTIIdM7wfFkOqlFztcC/gVXDKohKoVB3OyonfelUBaJzSwpBntZ2KYGF/9S7asCHsXwW6g==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-function-name" "^7.23.0"
- "@babel/helper-member-expression-to-functions" "^7.24.5"
- "@babel/helper-optimise-call-expression" "^7.22.5"
- "@babel/helper-replace-supers" "^7.24.1"
- "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5"
- "@babel/helper-split-export-declaration" "^7.24.5"
- semver "^6.3.1"
-
"@babel/helper-create-class-features-plugin@^7.24.7", "@babel/helper-create-class-features-plugin@^7.25.0", "@babel/helper-create-class-features-plugin@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.4.tgz#57eaf1af38be4224a9d9dd01ddde05b741f50e14"
@@ -330,16 +246,7 @@
"@babel/traverse" "^7.25.4"
semver "^6.3.1"
-"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.22.15", "@babel/helper-create-regexp-features-plugin@^7.22.5":
- version "7.22.15"
- resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz#5ee90093914ea09639b01c711db0d6775e558be1"
- integrity sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- regexpu-core "^5.3.1"
- semver "^6.3.1"
-
-"@babel/helper-create-regexp-features-plugin@^7.24.7", "@babel/helper-create-regexp-features-plugin@^7.25.0", "@babel/helper-create-regexp-features-plugin@^7.25.2":
+"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.24.7", "@babel/helper-create-regexp-features-plugin@^7.25.0", "@babel/helper-create-regexp-features-plugin@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.25.2.tgz#24c75974ed74183797ffd5f134169316cd1808d9"
integrity sha512-+wqVGP+DFmqwFD3EH6TMTfUNeqDehV3E/dl+Sd54eaXqm17tEUNbEIn4sVivVowbvUpOtIGxdo3GoXyDH9N/9g==
@@ -348,7 +255,7 @@
regexpu-core "^5.3.1"
semver "^6.3.1"
-"@babel/helper-define-polyfill-provider@^0.6.1", "@babel/helper-define-polyfill-provider@^0.6.2":
+"@babel/helper-define-polyfill-provider@^0.6.2":
version "0.6.2"
resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz#18594f789c3594acb24cfdb4a7f7b7d2e8bd912d"
integrity sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==
@@ -359,33 +266,6 @@
lodash.debounce "^4.0.8"
resolve "^1.14.2"
-"@babel/helper-environment-visitor@^7.22.20":
- version "7.22.20"
- resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167"
- integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==
-
-"@babel/helper-function-name@^7.23.0":
- version "7.23.0"
- resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759"
- integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==
- dependencies:
- "@babel/template" "^7.22.15"
- "@babel/types" "^7.23.0"
-
-"@babel/helper-hoist-variables@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb"
- integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==
- dependencies:
- "@babel/types" "^7.22.5"
-
-"@babel/helper-member-expression-to-functions@^7.23.0", "@babel/helper-member-expression-to-functions@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.5.tgz#5981e131d5c7003c7d1fa1ad49e86c9b097ec475"
- integrity sha512-4owRteeihKWKamtqg4JmWSsEZU445xpFRXPEwp44HbgbxdWlUV1b4Agg4lkA806Lil5XM/e+FJyS0vj5T6vmcA==
- dependencies:
- "@babel/types" "^7.24.5"
-
"@babel/helper-member-expression-to-functions@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.8.tgz#6155e079c913357d24a4c20480db7c712a5c3fb6"
@@ -394,13 +274,6 @@
"@babel/traverse" "^7.24.8"
"@babel/types" "^7.24.8"
-"@babel/helper-module-imports@^7.22.15", "@babel/helper-module-imports@^7.24.1", "@babel/helper-module-imports@^7.24.3":
- version "7.24.3"
- resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz#6ac476e6d168c7c23ff3ba3cf4f7841d46ac8128"
- integrity sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg==
- dependencies:
- "@babel/types" "^7.24.0"
-
"@babel/helper-module-imports@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz#f2f980392de5b84c3328fc71d38bd81bbb83042b"
@@ -409,17 +282,6 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
-"@babel/helper-module-transforms@^7.23.3", "@babel/helper-module-transforms@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.24.5.tgz#ea6c5e33f7b262a0ae762fd5986355c45f54a545"
- integrity sha512-9GxeY8c2d2mdQUP1Dye0ks3VDyIMS98kt/llQ2nUId8IsWqTF0l1LkSX0/uP7l7MCDrzXS009Hyhe2gzTiGW8A==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-module-imports" "^7.24.3"
- "@babel/helper-simple-access" "^7.24.5"
- "@babel/helper-split-export-declaration" "^7.24.5"
- "@babel/helper-validator-identifier" "^7.24.5"
-
"@babel/helper-module-transforms@^7.24.7", "@babel/helper-module-transforms@^7.24.8", "@babel/helper-module-transforms@^7.25.0", "@babel/helper-module-transforms@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz#ee713c29768100f2776edf04d4eb23b8d27a66e6"
@@ -430,13 +292,6 @@
"@babel/helper-validator-identifier" "^7.24.7"
"@babel/traverse" "^7.25.2"
-"@babel/helper-optimise-call-expression@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz#f21531a9ccbff644fdd156b4077c16ff0c3f609e"
- integrity sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==
- dependencies:
- "@babel/types" "^7.22.5"
-
"@babel/helper-optimise-call-expression@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz#8b0a0456c92f6b323d27cfd00d1d664e76692a0f"
@@ -444,25 +299,11 @@
dependencies:
"@babel/types" "^7.24.7"
-"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.24.0", "@babel/helper-plugin-utils@^7.24.5", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.5.tgz#a924607dd254a65695e5bd209b98b902b3b2f11a"
- integrity sha512-xjNLDopRzW2o6ba0gKbkZq5YWEBaK3PCyTOY1K2P/O07LGMhMqlMXPxwN4S5/RhWuCobT8z0jrlKGlYmeR1OhQ==
-
-"@babel/helper-plugin-utils@^7.24.7", "@babel/helper-plugin-utils@^7.24.8":
+"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.24.7", "@babel/helper-plugin-utils@^7.24.8", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz#94ee67e8ec0e5d44ea7baeb51e571bd26af07878"
integrity sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==
-"@babel/helper-remap-async-to-generator@^7.22.20":
- version "7.22.20"
- resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz#7b68e1cb4fa964d2996fd063723fb48eca8498e0"
- integrity sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-wrap-function" "^7.22.20"
-
"@babel/helper-remap-async-to-generator@^7.24.7", "@babel/helper-remap-async-to-generator@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.0.tgz#d2f0fbba059a42d68e5e378feaf181ef6055365e"
@@ -472,15 +313,6 @@
"@babel/helper-wrap-function" "^7.25.0"
"@babel/traverse" "^7.25.0"
-"@babel/helper-replace-supers@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.24.1.tgz#7085bd19d4a0b7ed8f405c1ed73ccb70f323abc1"
- integrity sha512-QCR1UqC9BzG5vZl8BMicmZ28RuUBnHhAMddD8yHFHDRH9lLTZ9uUPehX8ctVPT8l0TKblJidqcgUUKGVrePleQ==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-member-expression-to-functions" "^7.23.0"
- "@babel/helper-optimise-call-expression" "^7.22.5"
-
"@babel/helper-replace-supers@^7.24.7", "@babel/helper-replace-supers@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.25.0.tgz#ff44deac1c9f619523fe2ca1fd650773792000a9"
@@ -490,13 +322,6 @@
"@babel/helper-optimise-call-expression" "^7.24.7"
"@babel/traverse" "^7.25.0"
-"@babel/helper-simple-access@^7.22.5", "@babel/helper-simple-access@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.24.5.tgz#50da5b72f58c16b07fbd992810be6049478e85ba"
- integrity sha512-uH3Hmf5q5n7n8mz7arjUlDOCbttY/DW4DYhE6FUsjKJ/oYC1kQQUvwEQWxRwUpX9qQKRXeqLwWxrqilMrf32sQ==
- dependencies:
- "@babel/types" "^7.24.5"
-
"@babel/helper-simple-access@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz#bcade8da3aec8ed16b9c4953b74e506b51b5edb3"
@@ -505,13 +330,6 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
-"@babel/helper-skip-transparent-expression-wrappers@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz#007f15240b5751c537c40e77abb4e89eeaaa8847"
- integrity sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==
- dependencies:
- "@babel/types" "^7.22.5"
-
"@babel/helper-skip-transparent-expression-wrappers@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz#5f8fa83b69ed5c27adc56044f8be2b3ea96669d9"
@@ -520,52 +338,21 @@
"@babel/traverse" "^7.24.7"
"@babel/types" "^7.24.7"
-"@babel/helper-split-export-declaration@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.5.tgz#b9a67f06a46b0b339323617c8c6213b9055a78b6"
- integrity sha512-5CHncttXohrHk8GWOFCcCl4oRD9fKosWlIRgWm4ql9VYioKm52Mk2xsmoohvm7f3JoiLSM5ZgJuRaf5QZZYd3Q==
- dependencies:
- "@babel/types" "^7.24.5"
-
-"@babel/helper-string-parser@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz#f99c36d3593db9540705d0739a1f10b5e20c696e"
- integrity sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==
-
"@babel/helper-string-parser@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz#5b3329c9a58803d5df425e5785865881a81ca48d"
integrity sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==
-"@babel/helper-validator-identifier@^7.22.20", "@babel/helper-validator-identifier@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.5.tgz#918b1a7fa23056603506370089bd990d8720db62"
- integrity sha512-3q93SSKX2TWCG30M2G2kwaKeTYgEUp5Snjuj8qm729SObL6nbtUldAi37qbxkD5gg3xnBio+f9nqpSepGZMvxA==
-
"@babel/helper-validator-identifier@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz#75b889cfaf9e35c2aaf42cf0d72c8e91719251db"
integrity sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==
-"@babel/helper-validator-option@^7.23.5":
- version "7.23.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307"
- integrity sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==
-
"@babel/helper-validator-option@^7.24.7", "@babel/helper-validator-option@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz#3725cdeea8b480e86d34df15304806a06975e33d"
integrity sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==
-"@babel/helper-wrap-function@^7.22.20":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.24.5.tgz#335f934c0962e2c1ed1fb9d79e06a56115067c09"
- integrity sha512-/xxzuNvgRl4/HLNKvnFwdhdgN3cpLxgLROeLDl83Yx0AJ1SGvq1ak0OszTOjDfiB8Vx03eJbeDWh9r+jCCWttw==
- dependencies:
- "@babel/helper-function-name" "^7.23.0"
- "@babel/template" "^7.24.0"
- "@babel/types" "^7.24.5"
-
"@babel/helper-wrap-function@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.0.tgz#dab12f0f593d6ca48c0062c28bcfb14ebe812f81"
@@ -575,15 +362,6 @@
"@babel/traverse" "^7.25.0"
"@babel/types" "^7.25.0"
-"@babel/helpers@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.24.5.tgz#fedeb87eeafa62b621160402181ad8585a22a40a"
- integrity sha512-CiQmBMMpMQHwM5m01YnrM6imUG1ebgYJ+fAIW4FZe6m4qHTPaRHti+R8cggAwkdz4oXhtO4/K9JWlh+8hIfR2Q==
- dependencies:
- "@babel/template" "^7.24.0"
- "@babel/traverse" "^7.24.5"
- "@babel/types" "^7.24.5"
-
"@babel/helpers@^7.25.0":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.25.6.tgz#57ee60141829ba2e102f30711ffe3afab357cc60"
@@ -592,16 +370,6 @@
"@babel/template" "^7.25.0"
"@babel/types" "^7.25.6"
-"@babel/highlight@^7.24.2":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.5.tgz#bc0613f98e1dd0720e99b2a9ee3760194a704b6e"
- integrity sha512-8lLmua6AVh/8SLJRRVD6V8p73Hir9w5mJrhE+IPpILG31KKlI9iz5zmBYKcWPS59qSfgP9RaSBQSHHE81WKuEw==
- dependencies:
- "@babel/helper-validator-identifier" "^7.24.5"
- chalk "^2.4.2"
- js-tokens "^4.0.0"
- picocolors "^1.0.0"
-
"@babel/highlight@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.7.tgz#a05ab1df134b286558aae0ed41e6c5f731bf409d"
@@ -612,11 +380,6 @@
js-tokens "^4.0.0"
picocolors "^1.0.0"
-"@babel/parser@^7.24.0", "@babel/parser@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.5.tgz#4a4d5ab4315579e5398a82dcf636ca80c3392790"
- integrity sha512-EOv5IK8arwh3LI47dz1b0tKUb/1uhHAnHJOrjgtQMIpu1uXd9mlFrJg9IUgGUgZ41Ch0K8REPTYpO7B76b4vJg==
-
"@babel/parser@^7.25.0", "@babel/parser@^7.25.6":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.25.6.tgz#85660c5ef388cbbf6e3d2a694ee97a38f18afe2f"
@@ -624,14 +387,6 @@
dependencies:
"@babel/types" "^7.25.6"
-"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.5.tgz#4c3685eb9cd790bcad2843900fe0250c91ccf895"
- integrity sha512-LdXRi1wEMTrHVR4Zc9F8OewC3vdm5h4QB6L71zy6StmYeqGi1b3ttIO8UC+BfZKcH9jdr4aI249rBkm+3+YvHw==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-plugin-utils" "^7.24.5"
-
"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.25.3":
version "7.25.3"
resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.3.tgz#dca427b45a6c0f5c095a1c639dfe2476a3daba7f"
@@ -647,13 +402,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.1.tgz#b645d9ba8c2bc5b7af50f0fe949f9edbeb07c8cf"
- integrity sha512-y4HqEnkelJIOQGd+3g1bTeKsA5c6qM7eOn7VggGVbBc0y8MLSKHacwcIE2PplNlQSj0PqS9rrXL/nkPVK+kUNg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.0.tgz#749bde80356b295390954643de7635e0dffabe73"
@@ -661,15 +409,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.1.tgz#da8261f2697f0f41b0855b91d3a20a1fbfd271d3"
- integrity sha512-Hj791Ii4ci8HqnaKHAlLNs+zaLXb0EzSDhiAWp5VNlyvCNymYfacs64pxTxbH1znW/NcArSmwpmG9IKE/TUVVQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5"
- "@babel/plugin-transform-optional-chaining" "^7.24.1"
-
"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz#e4eabdd5109acc399b38d7999b2ef66fc2022f89"
@@ -679,14 +418,6 @@
"@babel/helper-skip-transparent-expression-wrappers" "^7.24.7"
"@babel/plugin-transform-optional-chaining" "^7.24.7"
-"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.1.tgz#1181d9685984c91d657b8ddf14f0487a6bab2988"
- integrity sha512-m9m/fXsXLiHfwdgydIFnpk+7jlVbnvlK5B2EKiPdLUb6WX654ZaaEWJUjk8TftRbZpK0XibovlLWX4KIZhV6jw==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.0.tgz#3a82a70e7cb7294ad2559465ebcb871dfbf078fb"
@@ -735,13 +466,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.8.3"
-"@babel/plugin-syntax-import-assertions@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.1.tgz#db3aad724153a00eaac115a3fb898de544e34971"
- integrity sha512-IuwnI5XnuF189t91XbxmXeCDz3qs6iDRO7GJ++wcfgeXNs/8FmIlKcpDSXNVyuLQxlwvskmI3Ct73wUODkJBlQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-syntax-import-assertions@^7.24.7":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.25.6.tgz#bb918905c58711b86f9710d74a3744b6c56573b5"
@@ -749,13 +473,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-syntax-import-attributes@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.1.tgz#c66b966c63b714c4eec508fcf5763b1f2d381093"
- integrity sha512-zhQTMH0X2nVLnb04tz+s7AMuasX8U0FnpE+nHTOhSOINjWMnopoZTxtIKsd45n4GQ/HIZLyfIpoul8e2m0DnRA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-syntax-import-attributes@^7.24.7":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.25.6.tgz#6d4c78f042db0e82fd6436cd65fec5dc78ad2bde"
@@ -777,13 +494,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
-"@babel/plugin-syntax-jsx@^7.23.3", "@babel/plugin-syntax-jsx@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.1.tgz#3f6ca04b8c841811dbc3c5c5f837934e0d626c10"
- integrity sha512-2eCtxZXf+kbkMIsXS4poTvT4Yu5rXiRa+9xGVT56raghjmBTKMpFNc9R4IDiB4emao9eO22Ox7CxuJG7BgExqA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-syntax-jsx@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz#39a1fa4a7e3d3d7f34e2acc6be585b718d30e02d"
@@ -847,13 +557,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.14.5"
-"@babel/plugin-syntax-typescript@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.1.tgz#b3bcc51f396d15f3591683f90239de143c076844"
- integrity sha512-Yhnmvy5HZEnHUty6i++gcfH1/l68AHnItFHnaCv6hn9dNh0hQvvQJsxpi4BMBFN5DLeHBuucT/0DgzXif/OyRw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-syntax-typescript@^7.24.7":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.4.tgz#04db9ce5a9043d9c635e75ae7969a2cd50ca97ff"
@@ -869,13 +572,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.18.6"
"@babel/helper-plugin-utils" "^7.18.6"
-"@babel/plugin-transform-arrow-functions@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.1.tgz#2bf263617060c9cc45bcdbf492b8cc805082bf27"
- integrity sha512-ngT/3NkRhsaep9ck9uj2Xhv9+xB1zShY3tM3g6om4xxCELwCDN4g4Aq5dRn48+0hasAql7s2hdBOysCfNpr4fw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-arrow-functions@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz#4f6886c11e423bd69f3ce51dbf42424a5f275514"
@@ -883,16 +579,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-async-generator-functions@^7.24.3":
- version "7.24.3"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.3.tgz#8fa7ae481b100768cc9842c8617808c5352b8b89"
- integrity sha512-Qe26CMYVjpQxJ8zxM1340JFNjZaF+ISWpr1Kt/jGo+ZTUzKkfw/pphEWbRCb+lmSM6k/TOgfYLvmbHkUQ0asIg==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-remap-async-to-generator" "^7.22.20"
- "@babel/plugin-syntax-async-generators" "^7.8.4"
-
"@babel/plugin-transform-async-generator-functions@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.4.tgz#2afd4e639e2d055776c9f091b6c0c180ed8cf083"
@@ -903,15 +589,6 @@
"@babel/plugin-syntax-async-generators" "^7.8.4"
"@babel/traverse" "^7.25.4"
-"@babel/plugin-transform-async-to-generator@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.1.tgz#0e220703b89f2216800ce7b1c53cb0cf521c37f4"
- integrity sha512-AawPptitRXp1y0n4ilKcGbRYWfbbzFWz2NqNu7dacYDtFtz0CMjG64b3LQsb3KIgnf4/obcUL78hfaOS7iCUfw==
- dependencies:
- "@babel/helper-module-imports" "^7.24.1"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-remap-async-to-generator" "^7.22.20"
-
"@babel/plugin-transform-async-to-generator@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz#72a3af6c451d575842a7e9b5a02863414355bdcc"
@@ -921,13 +598,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/helper-remap-async-to-generator" "^7.24.7"
-"@babel/plugin-transform-block-scoped-functions@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.1.tgz#1c94799e20fcd5c4d4589523bbc57b7692979380"
- integrity sha512-TWWC18OShZutrv9C6mye1xwtam+uNi2bnTOCBUd5sZxyHOiWbU6ztSROofIMrK84uweEZC219POICK/sTYwfgg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-block-scoped-functions@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz#a4251d98ea0c0f399dafe1a35801eaba455bbf1f"
@@ -935,13 +605,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-block-scoping@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.5.tgz#89574191397f85661d6f748d4b89ee4d9ee69a2a"
- integrity sha512-sMfBc3OxghjC95BkYrYocHL3NaOplrcaunblzwXhGmlPwpmfsxr4vK+mBBt49r+S240vahmv+kUxkeKgs+haCw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.5"
-
"@babel/plugin-transform-block-scoping@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.0.tgz#23a6ed92e6b006d26b1869b1c91d1b917c2ea2ac"
@@ -949,14 +612,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-class-properties@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.1.tgz#bcbf1aef6ba6085cfddec9fc8d58871cf011fc29"
- integrity sha512-OMLCXi0NqvJfORTaPQBwqLXHhb93wkBKZ4aNwMl6WtehO7ar+cmp+89iPEQPqxAnxsOKTaMcs3POz3rKayJ72g==
- dependencies:
- "@babel/helper-create-class-features-plugin" "^7.24.1"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-class-properties@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.4.tgz#bae7dbfcdcc2e8667355cd1fb5eda298f05189fd"
@@ -965,15 +620,6 @@
"@babel/helper-create-class-features-plugin" "^7.25.4"
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-class-static-block@^7.24.4":
- version "7.24.4"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.4.tgz#1a4653c0cf8ac46441ec406dece6e9bc590356a4"
- integrity sha512-B8q7Pz870Hz/q9UgP8InNpY01CSLDSCyqX7zcRuv3FcPl87A2G17lASroHWaCtbdIcbYzOZ7kWmXFKbijMSmFg==
- dependencies:
- "@babel/helper-create-class-features-plugin" "^7.24.4"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-class-static-block" "^7.14.5"
-
"@babel/plugin-transform-class-static-block@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz#c82027ebb7010bc33c116d4b5044fbbf8c05484d"
@@ -983,20 +629,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-class-static-block" "^7.14.5"
-"@babel/plugin-transform-classes@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.5.tgz#05e04a09df49a46348299a0e24bfd7e901129339"
- integrity sha512-gWkLP25DFj2dwe9Ck8uwMOpko4YsqyfZJrOmqqcegeDYEbp7rmn4U6UQZNj08UF6MaX39XenSpKRCvpDRBtZ7Q==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-compilation-targets" "^7.23.6"
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-function-name" "^7.23.0"
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/helper-replace-supers" "^7.24.1"
- "@babel/helper-split-export-declaration" "^7.24.5"
- globals "^11.1.0"
-
"@babel/plugin-transform-classes@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.4.tgz#d29dbb6a72d79f359952ad0b66d88518d65ef89a"
@@ -1009,14 +641,6 @@
"@babel/traverse" "^7.25.4"
globals "^11.1.0"
-"@babel/plugin-transform-computed-properties@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.1.tgz#bc7e787f8e021eccfb677af5f13c29a9934ed8a7"
- integrity sha512-5pJGVIUfJpOS+pAqBQd+QMaTD2vCL/HcePooON6pDpHgRp4gNRmzyHTPIkXntwKsq3ayUFVfJaIKPw2pOkOcTw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/template" "^7.24.0"
-
"@babel/plugin-transform-computed-properties@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz#4cab3214e80bc71fae3853238d13d097b004c707"
@@ -1025,13 +649,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/template" "^7.24.7"
-"@babel/plugin-transform-destructuring@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.5.tgz#80843ee6a520f7362686d1a97a7b53544ede453c"
- integrity sha512-SZuuLyfxvsm+Ah57I/i1HVjveBENYK9ue8MJ7qkc7ndoNjqquJiElzA7f5yaAXjyW2hKojosOTAQQRX50bPSVg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.5"
-
"@babel/plugin-transform-destructuring@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.8.tgz#c828e814dbe42a2718a838c2a2e16a408e055550"
@@ -1039,14 +656,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-dotall-regex@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.1.tgz#d56913d2f12795cc9930801b84c6f8c47513ac13"
- integrity sha512-p7uUxgSoZwZ2lPNMzUkqCts3xlp8n+o05ikjy7gbtFJSt9gdU88jAmtfmOxHM14noQXBxfgzf2yRWECiNVhTCw==
- dependencies:
- "@babel/helper-create-regexp-features-plugin" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-dotall-regex@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz#5f8bf8a680f2116a7207e16288a5f974ad47a7a0"
@@ -1055,13 +664,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-duplicate-keys@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.1.tgz#5347a797fe82b8d09749d10e9f5b83665adbca88"
- integrity sha512-msyzuUnvsjsaSaocV6L7ErfNsa5nDWL1XKNnDePLgmz+WdU4w/J8+AxBMrWfi9m4IxfL5sZQKUPQKDQeeAT6lA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-duplicate-keys@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz#dd20102897c9a2324e5adfffb67ff3610359a8ee"
@@ -1077,14 +679,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.25.0"
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-dynamic-import@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.1.tgz#2a5a49959201970dd09a5fca856cb651e44439dd"
- integrity sha512-av2gdSTyXcJVdI+8aFZsCAtR29xJt0S5tas+Ef8NvBNmD1a+N/3ecMLeMBgfcK+xzsjdLDT6oHt+DFPyeqUbDA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-dynamic-import" "^7.8.3"
-
"@babel/plugin-transform-dynamic-import@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz#4d8b95e3bae2b037673091aa09cd33fecd6419f4"
@@ -1093,14 +687,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-dynamic-import" "^7.8.3"
-"@babel/plugin-transform-exponentiation-operator@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.1.tgz#6650ebeb5bd5c012d5f5f90a26613a08162e8ba4"
- integrity sha512-U1yX13dVBSwS23DEAqU+Z/PkwE9/m7QQy8Y9/+Tdb8UWYaGNDYwTLi19wqIAiROr8sXVum9A/rtiH5H0boUcTw==
- dependencies:
- "@babel/helper-builder-binary-assignment-operator-visitor" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-exponentiation-operator@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz#b629ee22645f412024297d5245bce425c31f9b0d"
@@ -1109,14 +695,6 @@
"@babel/helper-builder-binary-assignment-operator-visitor" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-export-namespace-from@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.1.tgz#f033541fc036e3efb2dcb58eedafd4f6b8078acd"
- integrity sha512-Ft38m/KFOyzKw2UaJFkWG9QnHPG/Q/2SkOrRk4pNBPg5IPZ+dOxcmkK5IyuBcxiNPyyYowPGUReyBvrvZs7IlQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-export-namespace-from" "^7.8.3"
-
"@babel/plugin-transform-export-namespace-from@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz#176d52d8d8ed516aeae7013ee9556d540c53f197"
@@ -1125,14 +703,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-export-namespace-from" "^7.8.3"
-"@babel/plugin-transform-for-of@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.1.tgz#67448446b67ab6c091360ce3717e7d3a59e202fd"
- integrity sha512-OxBdcnF04bpdQdR3i4giHZNZQn7cm8RQKcSwA17wAAqEELo1ZOwp5FFgeptWUQXFyT9kwHo10aqqauYkRZPCAg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5"
-
"@babel/plugin-transform-for-of@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz#f25b33f72df1d8be76399e1b8f3f9d366eb5bc70"
@@ -1141,15 +711,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/helper-skip-transparent-expression-wrappers" "^7.24.7"
-"@babel/plugin-transform-function-name@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.1.tgz#8cba6f7730626cc4dfe4ca2fa516215a0592b361"
- integrity sha512-BXmDZpPlh7jwicKArQASrj8n22/w6iymRnvHYYd2zO30DbE277JO20/7yXJT3QxDPtiQiOxQBbZH4TpivNXIxA==
- dependencies:
- "@babel/helper-compilation-targets" "^7.23.6"
- "@babel/helper-function-name" "^7.23.0"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-function-name@^7.25.1":
version "7.25.1"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.1.tgz#b85e773097526c1a4fc4ba27322748643f26fc37"
@@ -1159,14 +720,6 @@
"@babel/helper-plugin-utils" "^7.24.8"
"@babel/traverse" "^7.25.1"
-"@babel/plugin-transform-json-strings@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.1.tgz#08e6369b62ab3e8a7b61089151b161180c8299f7"
- integrity sha512-U7RMFmRvoasscrIFy5xA4gIp8iWnWubnKkKuUGJjsuOH7GfbMkB+XZzeslx2kLdEGdOJDamEmCqOks6e8nv8DQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-json-strings" "^7.8.3"
-
"@babel/plugin-transform-json-strings@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz#f3e9c37c0a373fee86e36880d45b3664cedaf73a"
@@ -1175,13 +728,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-json-strings" "^7.8.3"
-"@babel/plugin-transform-literals@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.1.tgz#0a1982297af83e6b3c94972686067df588c5c096"
- integrity sha512-zn9pwz8U7nCqOYIiBaOxoQOtYmMODXTJnkxG4AtX8fPmnCRYWBOHD0qcpwS9e2VDSp1zNJYpdnFMIKb8jmwu6g==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-literals@^7.25.2":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.2.tgz#deb1ad14fc5490b9a65ed830e025bca849d8b5f3"
@@ -1189,14 +735,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-logical-assignment-operators@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.1.tgz#719d8aded1aa94b8fb34e3a785ae8518e24cfa40"
- integrity sha512-OhN6J4Bpz+hIBqItTeWJujDOfNP+unqv/NJgyhlpSqgBTPm37KkMmZV6SYcOj+pnDbdcl1qRGV/ZiIjX9Iy34w==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
-
"@babel/plugin-transform-logical-assignment-operators@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz#a58fb6eda16c9dc8f9ff1c7b1ba6deb7f4694cb0"
@@ -1205,13 +743,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
-"@babel/plugin-transform-member-expression-literals@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.1.tgz#896d23601c92f437af8b01371ad34beb75df4489"
- integrity sha512-4ojai0KysTWXzHseJKa1XPNXKRbuUrhkOPY4rEGeR+7ChlJVKxFa3H3Bz+7tWaGKgJAXUWKOGmltN+u9B3+CVg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-member-expression-literals@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz#3b4454fb0e302e18ba4945ba3246acb1248315df"
@@ -1219,14 +750,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-modules-amd@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.1.tgz#b6d829ed15258536977e9c7cc6437814871ffa39"
- integrity sha512-lAxNHi4HVtjnHd5Rxg3D5t99Xm6H7b04hUS7EHIXcUl2EV4yl1gWdqZrNzXnSrHveL9qMdbODlLF55mvgjAfaQ==
- dependencies:
- "@babel/helper-module-transforms" "^7.23.3"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-modules-amd@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz#65090ed493c4a834976a3ca1cde776e6ccff32d7"
@@ -1235,15 +758,6 @@
"@babel/helper-module-transforms" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-modules-commonjs@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.1.tgz#e71ba1d0d69e049a22bf90b3867e263823d3f1b9"
- integrity sha512-szog8fFTUxBfw0b98gEWPaEqF42ZUD/T3bkynW/wtgx2p/XCP55WEsb+VosKceRSd6njipdZvNogqdtI4Q0chw==
- dependencies:
- "@babel/helper-module-transforms" "^7.23.3"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-simple-access" "^7.22.5"
-
"@babel/plugin-transform-modules-commonjs@^7.24.7", "@babel/plugin-transform-modules-commonjs@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.8.tgz#ab6421e564b717cb475d6fff70ae7f103536ea3c"
@@ -1253,16 +767,6 @@
"@babel/helper-plugin-utils" "^7.24.8"
"@babel/helper-simple-access" "^7.24.7"
-"@babel/plugin-transform-modules-systemjs@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.1.tgz#2b9625a3d4e445babac9788daec39094e6b11e3e"
- integrity sha512-mqQ3Zh9vFO1Tpmlt8QPnbwGHzNz3lpNEMxQb1kAemn/erstyqw1r9KeOlOfo3y6xAnFEcOv2tSyrXfmMk+/YZA==
- dependencies:
- "@babel/helper-hoist-variables" "^7.22.5"
- "@babel/helper-module-transforms" "^7.23.3"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-validator-identifier" "^7.22.20"
-
"@babel/plugin-transform-modules-systemjs@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.0.tgz#8f46cdc5f9e5af74f3bd019485a6cbe59685ea33"
@@ -1273,14 +777,6 @@
"@babel/helper-validator-identifier" "^7.24.7"
"@babel/traverse" "^7.25.0"
-"@babel/plugin-transform-modules-umd@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.1.tgz#69220c66653a19cf2c0872b9c762b9a48b8bebef"
- integrity sha512-tuA3lpPj+5ITfcCluy6nWonSL7RvaG0AOTeAuvXqEKS34lnLzXpDb0dcP6K8jD0zWZFNDVly90AGFJPnm4fOYg==
- dependencies:
- "@babel/helper-module-transforms" "^7.23.3"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-modules-umd@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz#edd9f43ec549099620df7df24e7ba13b5c76efc8"
@@ -1289,14 +785,6 @@
"@babel/helper-module-transforms" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-named-capturing-groups-regex@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz#67fe18ee8ce02d57c855185e27e3dc959b2e991f"
- integrity sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==
- dependencies:
- "@babel/helper-create-regexp-features-plugin" "^7.22.5"
- "@babel/helper-plugin-utils" "^7.22.5"
-
"@babel/plugin-transform-named-capturing-groups-regex@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz#9042e9b856bc6b3688c0c2e4060e9e10b1460923"
@@ -1305,13 +793,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-new-target@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.1.tgz#29c59988fa3d0157de1c871a28cd83096363cc34"
- integrity sha512-/rurytBM34hYy0HKZQyA0nHbQgQNFm4Q/BOc9Hflxi2X3twRof7NaE5W46j4kQitm7SvACVRXsa6N/tSZxvPug==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-new-target@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz#31ff54c4e0555cc549d5816e4ab39241dfb6ab00"
@@ -1319,14 +800,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-nullish-coalescing-operator@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.1.tgz#0cd494bb97cb07d428bd651632cb9d4140513988"
- integrity sha512-iQ+caew8wRrhCikO5DrUYx0mrmdhkaELgFa+7baMcVuhxIkN7oxt06CZ51D65ugIb1UWRQ8oQe+HXAVM6qHFjw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
-
"@babel/plugin-transform-nullish-coalescing-operator@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz#1de4534c590af9596f53d67f52a92f12db984120"
@@ -1335,14 +808,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
-"@babel/plugin-transform-numeric-separator@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.1.tgz#5bc019ce5b3435c1cadf37215e55e433d674d4e8"
- integrity sha512-7GAsGlK4cNL2OExJH1DzmDeKnRv/LXq0eLUSvudrehVA5Rgg4bIrqEUW29FbKMBRT0ztSqisv7kjP+XIC4ZMNw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-numeric-separator" "^7.10.4"
-
"@babel/plugin-transform-numeric-separator@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz#bea62b538c80605d8a0fac9b40f48e97efa7de63"
@@ -1351,16 +816,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-numeric-separator" "^7.10.4"
-"@babel/plugin-transform-object-rest-spread@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.5.tgz#f91bbcb092ff957c54b4091c86bda8372f0b10ef"
- integrity sha512-7EauQHszLGM3ay7a161tTQH7fj+3vVM/gThlz5HpFtnygTxjrlvoeq7MPVA1Vy9Q555OB8SnAOsMkLShNkkrHA==
- dependencies:
- "@babel/helper-compilation-targets" "^7.23.6"
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/plugin-syntax-object-rest-spread" "^7.8.3"
- "@babel/plugin-transform-parameters" "^7.24.5"
-
"@babel/plugin-transform-object-rest-spread@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz#d13a2b93435aeb8a197e115221cab266ba6e55d6"
@@ -1371,14 +826,6 @@
"@babel/plugin-syntax-object-rest-spread" "^7.8.3"
"@babel/plugin-transform-parameters" "^7.24.7"
-"@babel/plugin-transform-object-super@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.1.tgz#e71d6ab13483cca89ed95a474f542bbfc20a0520"
- integrity sha512-oKJqR3TeI5hSLRxudMjFQ9re9fBVUU0GICqM3J1mi8MqlhVr6hC/ZN4ttAyMuQR6EZZIY6h/exe5swqGNNIkWQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-replace-supers" "^7.24.1"
-
"@babel/plugin-transform-object-super@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz#66eeaff7830bba945dd8989b632a40c04ed625be"
@@ -1387,14 +834,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/helper-replace-supers" "^7.24.7"
-"@babel/plugin-transform-optional-catch-binding@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.1.tgz#92a3d0efe847ba722f1a4508669b23134669e2da"
- integrity sha512-oBTH7oURV4Y+3EUrf6cWn1OHio3qG/PVwO5J03iSJmBg6m2EhKjkAu/xuaXaYwWW9miYtvbWv4LNf0AmR43LUA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
-
"@babel/plugin-transform-optional-catch-binding@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz#00eabd883d0dd6a60c1c557548785919b6e717b4"
@@ -1403,15 +842,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
-"@babel/plugin-transform-optional-chaining@^7.24.1", "@babel/plugin-transform-optional-chaining@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.5.tgz#a6334bebd7f9dd3df37447880d0bd64b778e600f"
- integrity sha512-xWCkmwKT+ihmA6l7SSTpk8e4qQl/274iNbSKRRS8mpqFR32ksy36+a+LWY8OXCCEefF8WFlnOHVsaDI2231wBg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5"
- "@babel/plugin-syntax-optional-chaining" "^7.8.3"
-
"@babel/plugin-transform-optional-chaining@^7.24.7", "@babel/plugin-transform-optional-chaining@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.8.tgz#bb02a67b60ff0406085c13d104c99a835cdf365d"
@@ -1421,13 +851,6 @@
"@babel/helper-skip-transparent-expression-wrappers" "^7.24.7"
"@babel/plugin-syntax-optional-chaining" "^7.8.3"
-"@babel/plugin-transform-parameters@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.5.tgz#5c3b23f3a6b8fed090f9b98f2926896d3153cc62"
- integrity sha512-9Co00MqZ2aoky+4j2jhofErthm6QVLKbpQrvz20c3CH9KQCLHyNB+t2ya4/UrRpQGR+Wrwjg9foopoeSdnHOkA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.5"
-
"@babel/plugin-transform-parameters@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz#5881f0ae21018400e320fc7eb817e529d1254b68"
@@ -1435,14 +858,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-private-methods@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.1.tgz#a0faa1ae87eff077e1e47a5ec81c3aef383dc15a"
- integrity sha512-tGvisebwBO5em4PaYNqt4fkw56K2VALsAbAakY0FjTYqJp7gfdrgr7YX76Or8/cpik0W6+tj3rZ0uHU9Oil4tw==
- dependencies:
- "@babel/helper-create-class-features-plugin" "^7.24.1"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-private-methods@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.4.tgz#9bbefbe3649f470d681997e0b64a4b254d877242"
@@ -1451,16 +866,6 @@
"@babel/helper-create-class-features-plugin" "^7.25.4"
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-private-property-in-object@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.5.tgz#f5d1fcad36e30c960134cb479f1ca98a5b06eda5"
- integrity sha512-JM4MHZqnWR04jPMujQDTBVRnqxpLLpx2tkn7iPn+Hmsc0Gnb79yvRWOkvqFOx3Z7P7VxiRIR22c4eGSNj87OBQ==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-create-class-features-plugin" "^7.24.5"
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/plugin-syntax-private-property-in-object" "^7.14.5"
-
"@babel/plugin-transform-private-property-in-object@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz#4eec6bc701288c1fab5f72e6a4bbc9d67faca061"
@@ -1471,13 +876,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-syntax-private-property-in-object" "^7.14.5"
-"@babel/plugin-transform-property-literals@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.1.tgz#d6a9aeab96f03749f4eebeb0b6ea8e90ec958825"
- integrity sha512-LetvD7CrHmEx0G442gOomRr66d7q8HzzGGr4PMHGr+5YIm6++Yke+jxj246rpvsbyhJwCLxcTn6zW1P1BSenqA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-property-literals@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz#f0d2ed8380dfbed949c42d4d790266525d63bbdc"
@@ -1492,46 +890,38 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-react-display-name@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.1.tgz#554e3e1a25d181f040cf698b93fd289a03bfdcdb"
- integrity sha512-mvoQg2f9p2qlpDQRBC7M3c3XTr0k7cp/0+kFKKO/7Gtu0LSw16eKB+Fabe2bDT/UpsyasTBBkAnbdsLrkD5XMw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
-"@babel/plugin-transform-react-jsx-development@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz#e716b6edbef972a92165cd69d92f1255f7e73e87"
- integrity sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==
+"@babel/plugin-transform-react-display-name@^7.24.7":
+ version "7.24.7"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.7.tgz#9caff79836803bc666bcfe210aeb6626230c293b"
+ integrity sha512-H/Snz9PFxKsS1JLI4dJLtnJgCJRoo0AUm3chP6NYr+9En1JMKloheEiLIhlp5MDVznWo+H3AAC1Mc8lmUEpsgg==
dependencies:
- "@babel/plugin-transform-react-jsx" "^7.22.5"
+ "@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-react-jsx@^7.22.5", "@babel/plugin-transform-react-jsx@^7.23.4":
- version "7.23.4"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.23.4.tgz#393f99185110cea87184ea47bcb4a7b0c2e39312"
- integrity sha512-5xOpoPguCZCRbo/JeHlloSkTA8Bld1J/E1/kLfD1nsuiW1m8tduTA1ERCgIZokDflX/IBzKcqR3l7VlRgiIfHA==
+"@babel/plugin-transform-react-jsx-development@^7.24.7":
+ version "7.24.7"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.24.7.tgz#eaee12f15a93f6496d852509a850085e6361470b"
+ integrity sha512-QG9EnzoGn+Qar7rxuW+ZOsbWOt56FvvI93xInqsZDC5fsekx1AlIO4KIJ5M+D0p0SqSH156EpmZyXq630B8OlQ==
dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-module-imports" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.22.5"
- "@babel/plugin-syntax-jsx" "^7.23.3"
- "@babel/types" "^7.23.4"
+ "@babel/plugin-transform-react-jsx" "^7.24.7"
-"@babel/plugin-transform-react-pure-annotations@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.1.tgz#c86bce22a53956331210d268e49a0ff06e392470"
- integrity sha512-+pWEAaDJvSm9aFvJNpLiM2+ktl2Sn2U5DdyiWdZBxmLc6+xGt88dvFqsHiAiDS+8WqUwbDfkKz9jRxK3M0k+kA==
+"@babel/plugin-transform-react-jsx@^7.24.7":
+ version "7.25.2"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.25.2.tgz#e37e8ebfa77e9f0b16ba07fadcb6adb47412227a"
+ integrity sha512-KQsqEAVBpU82NM/B/N9j9WOdphom1SZH3R+2V7INrQUH+V9EBFwZsEJl8eBIVeQE62FxJCc70jzEZwqU7RcVqA==
dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-plugin-utils" "^7.24.0"
+ "@babel/helper-annotate-as-pure" "^7.24.7"
+ "@babel/helper-module-imports" "^7.24.7"
+ "@babel/helper-plugin-utils" "^7.24.8"
+ "@babel/plugin-syntax-jsx" "^7.24.7"
+ "@babel/types" "^7.25.2"
-"@babel/plugin-transform-regenerator@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.1.tgz#625b7545bae52363bdc1fbbdc7252b5046409c8c"
- integrity sha512-sJwZBCzIBE4t+5Q4IGLaaun5ExVMRY0lYwos/jNecjMrVCygCdph3IKv0tkP5Fc87e/1+bebAmEAGBfnRD+cnw==
+"@babel/plugin-transform-react-pure-annotations@^7.24.7":
+ version "7.24.7"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.7.tgz#bdd9d140d1c318b4f28b29a00fb94f97ecab1595"
+ integrity sha512-PLgBVk3fzbmEjBJ/u8kFzOqS9tUeDjiaWud/rRym/yjCo/M9cASPlnrd2ZmmZpQT40fOOrvR8jh+n8jikrOhNA==
dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- regenerator-transform "^0.15.2"
+ "@babel/helper-annotate-as-pure" "^7.24.7"
+ "@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-transform-regenerator@^7.24.7":
version "7.24.7"
@@ -1541,13 +931,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
regenerator-transform "^0.15.2"
-"@babel/plugin-transform-reserved-words@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.1.tgz#8de729f5ecbaaf5cf83b67de13bad38a21be57c1"
- integrity sha512-JAclqStUfIwKN15HrsQADFgeZt+wexNQ0uLhuqvqAUFoqPMjEcFCYZBhq0LUdz6dZK/mD+rErhW71fbx8RYElg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-reserved-words@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz#80037fe4fbf031fc1125022178ff3938bb3743a4"
@@ -1556,24 +939,17 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/plugin-transform-runtime@^7.22.9":
- version "7.24.3"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.3.tgz#dc58ad4a31810a890550365cc922e1ff5acb5d7f"
- integrity sha512-J0BuRPNlNqlMTRJ72eVptpt9VcInbxO6iP3jaxr+1NPhC0UkKL+6oeX6VXMEYdADnuqmMmsBspt4d5w8Y/TCbQ==
+ version "7.25.4"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.25.4.tgz#96e4ad7bfbbe0b4a7b7e6f2a533ca326cf204963"
+ integrity sha512-8hsyG+KUYGY0coX6KUCDancA0Vw225KJ2HJO0yCNr1vq5r+lJTleDaJf0K7iOhjw4SWhu03TMBzYTJ9krmzULQ==
dependencies:
- "@babel/helper-module-imports" "^7.24.3"
- "@babel/helper-plugin-utils" "^7.24.0"
+ "@babel/helper-module-imports" "^7.24.7"
+ "@babel/helper-plugin-utils" "^7.24.8"
babel-plugin-polyfill-corejs2 "^0.4.10"
- babel-plugin-polyfill-corejs3 "^0.10.1"
+ babel-plugin-polyfill-corejs3 "^0.10.6"
babel-plugin-polyfill-regenerator "^0.6.1"
semver "^6.3.1"
-"@babel/plugin-transform-shorthand-properties@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.1.tgz#ba9a09144cf55d35ec6b93a32253becad8ee5b55"
- integrity sha512-LyjVB1nsJ6gTTUKRjRWx9C1s9hE7dLfP/knKdrfeH9UPtAGjYGgxIbFfx7xyLIEWs7Xe1Gnf8EWiUqfjLhInZA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-shorthand-properties@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz#85448c6b996e122fa9e289746140aaa99da64e73"
@@ -1581,14 +957,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-spread@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.1.tgz#a1acf9152cbf690e4da0ba10790b3ac7d2b2b391"
- integrity sha512-KjmcIM+fxgY+KxPVbjelJC6hrH1CgtPmTvdXAfn3/a9CnWGSTY7nH4zm5+cjmWJybdcPSsD0++QssDsjcpe47g==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5"
-
"@babel/plugin-transform-spread@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz#e8a38c0fde7882e0fb8f160378f74bd885cc7bb3"
@@ -1597,13 +965,6 @@
"@babel/helper-plugin-utils" "^7.24.7"
"@babel/helper-skip-transparent-expression-wrappers" "^7.24.7"
-"@babel/plugin-transform-sticky-regex@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.1.tgz#f03e672912c6e203ed8d6e0271d9c2113dc031b9"
- integrity sha512-9v0f1bRXgPVcPrngOQvLXeGNNVLc8UjMVfebo9ka0WF3/7+aVUHmaJVT3sa0XCzEFioPfPHZiOcYG9qOsH63cw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-sticky-regex@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz#96ae80d7a7e5251f657b5cf18f1ea6bf926f5feb"
@@ -1611,13 +972,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-template-literals@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.1.tgz#15e2166873a30d8617e3e2ccadb86643d327aab7"
- integrity sha512-WRkhROsNzriarqECASCNu/nojeXCDTE/F2HmRgOzi7NGvyfYGq1NEjKBK3ckLfRgGc6/lPAqP0vDOSw3YtG34g==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-template-literals@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz#a05debb4a9072ae8f985bcf77f3f215434c8f8c8"
@@ -1625,13 +979,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-typeof-symbol@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.5.tgz#703cace5ef74155fb5eecab63cbfc39bdd25fe12"
- integrity sha512-UTGnhYVZtTAjdwOTzT+sCyXmTn8AhaxOS/MjG9REclZ6ULHWF9KoCZur0HSGU7hk8PdBFKKbYe6+gqdXWz84Jg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.5"
-
"@babel/plugin-transform-typeof-symbol@^7.24.8":
version "7.24.8"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.8.tgz#383dab37fb073f5bfe6e60c654caac309f92ba1c"
@@ -1639,16 +986,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/plugin-transform-typescript@^7.24.1":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.5.tgz#bcba979e462120dc06a75bd34c473a04781931b8"
- integrity sha512-E0VWu/hk83BIFUWnsKZ4D81KXjN5L3MobvevOHErASk9IPwKHOkTgvqzvNo1yP/ePJWqqK2SpUR5z+KQbl6NVw==
- dependencies:
- "@babel/helper-annotate-as-pure" "^7.22.5"
- "@babel/helper-create-class-features-plugin" "^7.24.5"
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/plugin-syntax-typescript" "^7.24.1"
-
"@babel/plugin-transform-typescript@^7.24.7":
version "7.25.2"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.25.2.tgz#237c5d10de6d493be31637c6b9fa30b6c5461add"
@@ -1660,13 +997,6 @@
"@babel/helper-skip-transparent-expression-wrappers" "^7.24.7"
"@babel/plugin-syntax-typescript" "^7.24.7"
-"@babel/plugin-transform-unicode-escapes@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.1.tgz#fb3fa16676549ac7c7449db9b342614985c2a3a4"
- integrity sha512-RlkVIcWT4TLI96zM660S877E7beKlQw7Ig+wqkKBiWfj0zH5Q4h50q6er4wzZKRNSYpfo6ILJ+hrJAGSX2qcNw==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-unicode-escapes@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz#2023a82ced1fb4971630a2e079764502c4148e0e"
@@ -1674,14 +1004,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-unicode-property-regex@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.1.tgz#56704fd4d99da81e5e9f0c0c93cabd91dbc4889e"
- integrity sha512-Ss4VvlfYV5huWApFsF8/Sq0oXnGO+jB+rijFEFugTd3cwSObUSnUi88djgR5528Csl0uKlrI331kRqe56Ov2Ng==
- dependencies:
- "@babel/helper-create-regexp-features-plugin" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-unicode-property-regex@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz#9073a4cd13b86ea71c3264659590ac086605bbcd"
@@ -1690,14 +1012,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-unicode-regex@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.1.tgz#57c3c191d68f998ac46b708380c1ce4d13536385"
- integrity sha512-2A/94wgZgxfTsiLaQ2E36XAOdcZmGAaEEgVmxQWwZXWkGhvoHbaqXcKnU8zny4ycpu3vNqg0L/PcCiYtHtA13g==
- dependencies:
- "@babel/helper-create-regexp-features-plugin" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-unicode-regex@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz#dfc3d4a51127108099b19817c0963be6a2adf19f"
@@ -1706,14 +1020,6 @@
"@babel/helper-create-regexp-features-plugin" "^7.24.7"
"@babel/helper-plugin-utils" "^7.24.7"
-"@babel/plugin-transform-unicode-sets-regex@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.1.tgz#c1ea175b02afcffc9cf57a9c4658326625165b7f"
- integrity sha512-fqj4WuzzS+ukpgerpAoOnMfQXwUHFxXUZUE84oL2Kao2N8uSlvcpnAidKASgsNgzZHBsHWvcm8s9FPWUhAb8fA==
- dependencies:
- "@babel/helper-create-regexp-features-plugin" "^7.22.15"
- "@babel/helper-plugin-utils" "^7.24.0"
-
"@babel/plugin-transform-unicode-sets-regex@^7.25.4":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.4.tgz#be664c2a0697ffacd3423595d5edef6049e8946c"
@@ -1722,7 +1028,7 @@
"@babel/helper-create-regexp-features-plugin" "^7.25.2"
"@babel/helper-plugin-utils" "^7.24.8"
-"@babel/preset-env@^7.20.2":
+"@babel/preset-env@^7.20.2", "@babel/preset-env@^7.22.9":
version "7.25.4"
resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.25.4.tgz#be23043d43a34a2721cd0f676c7ba6f1481f6af6"
integrity sha512-W9Gyo+KmcxjGahtt3t9fb14vFRWvPpu5pT6GBlovAK6BTBcxgjfVMSQCfJl4oi35ODrxP6xx2Wr8LNST57Mraw==
@@ -1811,93 +1117,6 @@
core-js-compat "^3.37.1"
semver "^6.3.1"
-"@babel/preset-env@^7.22.9":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.24.5.tgz#6a9ac90bd5a5a9dae502af60dfc58c190551bbcd"
- integrity sha512-UGK2ifKtcC8i5AI4cH+sbLLuLc2ktYSFJgBAXorKAsHUZmrQ1q6aQ6i3BvU24wWs2AAKqQB6kq3N9V9Gw1HiMQ==
- dependencies:
- "@babel/compat-data" "^7.24.4"
- "@babel/helper-compilation-targets" "^7.23.6"
- "@babel/helper-plugin-utils" "^7.24.5"
- "@babel/helper-validator-option" "^7.23.5"
- "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.24.5"
- "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.24.1"
- "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.24.1"
- "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.24.1"
- "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2"
- "@babel/plugin-syntax-async-generators" "^7.8.4"
- "@babel/plugin-syntax-class-properties" "^7.12.13"
- "@babel/plugin-syntax-class-static-block" "^7.14.5"
- "@babel/plugin-syntax-dynamic-import" "^7.8.3"
- "@babel/plugin-syntax-export-namespace-from" "^7.8.3"
- "@babel/plugin-syntax-import-assertions" "^7.24.1"
- "@babel/plugin-syntax-import-attributes" "^7.24.1"
- "@babel/plugin-syntax-import-meta" "^7.10.4"
- "@babel/plugin-syntax-json-strings" "^7.8.3"
- "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
- "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3"
- "@babel/plugin-syntax-numeric-separator" "^7.10.4"
- "@babel/plugin-syntax-object-rest-spread" "^7.8.3"
- "@babel/plugin-syntax-optional-catch-binding" "^7.8.3"
- "@babel/plugin-syntax-optional-chaining" "^7.8.3"
- "@babel/plugin-syntax-private-property-in-object" "^7.14.5"
- "@babel/plugin-syntax-top-level-await" "^7.14.5"
- "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6"
- "@babel/plugin-transform-arrow-functions" "^7.24.1"
- "@babel/plugin-transform-async-generator-functions" "^7.24.3"
- "@babel/plugin-transform-async-to-generator" "^7.24.1"
- "@babel/plugin-transform-block-scoped-functions" "^7.24.1"
- "@babel/plugin-transform-block-scoping" "^7.24.5"
- "@babel/plugin-transform-class-properties" "^7.24.1"
- "@babel/plugin-transform-class-static-block" "^7.24.4"
- "@babel/plugin-transform-classes" "^7.24.5"
- "@babel/plugin-transform-computed-properties" "^7.24.1"
- "@babel/plugin-transform-destructuring" "^7.24.5"
- "@babel/plugin-transform-dotall-regex" "^7.24.1"
- "@babel/plugin-transform-duplicate-keys" "^7.24.1"
- "@babel/plugin-transform-dynamic-import" "^7.24.1"
- "@babel/plugin-transform-exponentiation-operator" "^7.24.1"
- "@babel/plugin-transform-export-namespace-from" "^7.24.1"
- "@babel/plugin-transform-for-of" "^7.24.1"
- "@babel/plugin-transform-function-name" "^7.24.1"
- "@babel/plugin-transform-json-strings" "^7.24.1"
- "@babel/plugin-transform-literals" "^7.24.1"
- "@babel/plugin-transform-logical-assignment-operators" "^7.24.1"
- "@babel/plugin-transform-member-expression-literals" "^7.24.1"
- "@babel/plugin-transform-modules-amd" "^7.24.1"
- "@babel/plugin-transform-modules-commonjs" "^7.24.1"
- "@babel/plugin-transform-modules-systemjs" "^7.24.1"
- "@babel/plugin-transform-modules-umd" "^7.24.1"
- "@babel/plugin-transform-named-capturing-groups-regex" "^7.22.5"
- "@babel/plugin-transform-new-target" "^7.24.1"
- "@babel/plugin-transform-nullish-coalescing-operator" "^7.24.1"
- "@babel/plugin-transform-numeric-separator" "^7.24.1"
- "@babel/plugin-transform-object-rest-spread" "^7.24.5"
- "@babel/plugin-transform-object-super" "^7.24.1"
- "@babel/plugin-transform-optional-catch-binding" "^7.24.1"
- "@babel/plugin-transform-optional-chaining" "^7.24.5"
- "@babel/plugin-transform-parameters" "^7.24.5"
- "@babel/plugin-transform-private-methods" "^7.24.1"
- "@babel/plugin-transform-private-property-in-object" "^7.24.5"
- "@babel/plugin-transform-property-literals" "^7.24.1"
- "@babel/plugin-transform-regenerator" "^7.24.1"
- "@babel/plugin-transform-reserved-words" "^7.24.1"
- "@babel/plugin-transform-shorthand-properties" "^7.24.1"
- "@babel/plugin-transform-spread" "^7.24.1"
- "@babel/plugin-transform-sticky-regex" "^7.24.1"
- "@babel/plugin-transform-template-literals" "^7.24.1"
- "@babel/plugin-transform-typeof-symbol" "^7.24.5"
- "@babel/plugin-transform-unicode-escapes" "^7.24.1"
- "@babel/plugin-transform-unicode-property-regex" "^7.24.1"
- "@babel/plugin-transform-unicode-regex" "^7.24.1"
- "@babel/plugin-transform-unicode-sets-regex" "^7.24.1"
- "@babel/preset-modules" "0.1.6-no-external-plugins"
- babel-plugin-polyfill-corejs2 "^0.4.10"
- babel-plugin-polyfill-corejs3 "^0.10.4"
- babel-plugin-polyfill-regenerator "^0.6.1"
- core-js-compat "^3.31.0"
- semver "^6.3.1"
-
"@babel/preset-modules@0.1.6-no-external-plugins":
version "0.1.6-no-external-plugins"
resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz#ccb88a2c49c817236861fee7826080573b8a923a"
@@ -1908,18 +1127,18 @@
esutils "^2.0.2"
"@babel/preset-react@^7.18.6", "@babel/preset-react@^7.22.5":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.24.1.tgz#2450c2ac5cc498ef6101a6ca5474de251e33aa95"
- integrity sha512-eFa8up2/8cZXLIpkafhaADTXSnl7IsUFCYenRWrARBz0/qZwcT0RBXpys0LJU4+WfPoF2ZG6ew6s2V6izMCwRA==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-validator-option" "^7.23.5"
- "@babel/plugin-transform-react-display-name" "^7.24.1"
- "@babel/plugin-transform-react-jsx" "^7.23.4"
- "@babel/plugin-transform-react-jsx-development" "^7.22.5"
- "@babel/plugin-transform-react-pure-annotations" "^7.24.1"
-
-"@babel/preset-typescript@^7.21.0":
+ version "7.24.7"
+ resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.24.7.tgz#480aeb389b2a798880bf1f889199e3641cbb22dc"
+ integrity sha512-AAH4lEkpmzFWrGVlHaxJB7RLH21uPQ9+He+eFLWHmF9IuFQVugz8eAsamaW0DXRrTfco5zj1wWtpdcXJUOfsag==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.24.7"
+ "@babel/helper-validator-option" "^7.24.7"
+ "@babel/plugin-transform-react-display-name" "^7.24.7"
+ "@babel/plugin-transform-react-jsx" "^7.24.7"
+ "@babel/plugin-transform-react-jsx-development" "^7.24.7"
+ "@babel/plugin-transform-react-pure-annotations" "^7.24.7"
+
+"@babel/preset-typescript@^7.21.0", "@babel/preset-typescript@^7.22.5":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz#66cd86ea8f8c014855671d5ea9a737139cbbfef1"
integrity sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==
@@ -1930,46 +1149,26 @@
"@babel/plugin-transform-modules-commonjs" "^7.24.7"
"@babel/plugin-transform-typescript" "^7.24.7"
-"@babel/preset-typescript@^7.22.5":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.24.1.tgz#89bdf13a3149a17b3b2a2c9c62547f06db8845ec"
- integrity sha512-1DBaMmRDpuYQBPWD8Pf/WEwCrtgRHxsZnP4mIy9G/X+hFfbI47Q2G4t1Paakld84+qsk2fSsUPMKg71jkoOOaQ==
- dependencies:
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-validator-option" "^7.23.5"
- "@babel/plugin-syntax-jsx" "^7.24.1"
- "@babel/plugin-transform-modules-commonjs" "^7.24.1"
- "@babel/plugin-transform-typescript" "^7.24.1"
-
"@babel/regjsgen@^0.8.0":
version "0.8.0"
resolved "https://registry.yarnpkg.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310"
integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==
"@babel/runtime-corejs3@^7.22.6":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.24.5.tgz#d2a5f46a088caf8f3899ad095054f83b0a686194"
- integrity sha512-GWO0mgzNMLWaSYM4z4NVIuY0Cd1fl8cPnuetuddu5w/qGuvt5Y7oUi/kvvQGK9xgOkFJDQX2heIvTRn/OQ1XTg==
+ version "7.25.6"
+ resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.25.6.tgz#5e3facf42775cc95bcde95746e940061931286e4"
+ integrity sha512-Gz0Nrobx8szge6kQQ5Z5MX9L3ObqNwCQY1PSwSNzreFL7aHGxv8Fp2j3ETV6/wWdbiV+mW6OSm8oQhg3Tcsniw==
dependencies:
core-js-pure "^3.30.2"
regenerator-runtime "^0.14.0"
"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.17.8", "@babel/runtime@^7.22.6", "@babel/runtime@^7.8.4":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.5.tgz#230946857c053a36ccc66e1dd03b17dd0c4ed02c"
- integrity sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g==
+ version "7.25.6"
+ resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.6.tgz#9afc3289f7184d8d7f98b099884c26317b9264d2"
+ integrity sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==
dependencies:
regenerator-runtime "^0.14.0"
-"@babel/template@^7.22.15", "@babel/template@^7.24.0":
- version "7.24.0"
- resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.0.tgz#c6a524aa93a4a05d66aaf31654258fae69d87d50"
- integrity sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==
- dependencies:
- "@babel/code-frame" "^7.23.5"
- "@babel/parser" "^7.24.0"
- "@babel/types" "^7.24.0"
-
"@babel/template@^7.24.7", "@babel/template@^7.25.0":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.0.tgz#e733dc3134b4fede528c15bc95e89cb98c52592a"
@@ -1979,23 +1178,7 @@
"@babel/parser" "^7.25.0"
"@babel/types" "^7.25.0"
-"@babel/traverse@^7.22.8", "@babel/traverse@^7.24.5":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.5.tgz#972aa0bc45f16983bf64aa1f877b2dd0eea7e6f8"
- integrity sha512-7aaBLeDQ4zYcUFDUD41lJc1fG8+5IU9DaNSJAgal866FGvmD5EbWQgnEC6kO1gGLsX0esNkfnJSndbTXA3r7UA==
- dependencies:
- "@babel/code-frame" "^7.24.2"
- "@babel/generator" "^7.24.5"
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-function-name" "^7.23.0"
- "@babel/helper-hoist-variables" "^7.22.5"
- "@babel/helper-split-export-declaration" "^7.24.5"
- "@babel/parser" "^7.24.5"
- "@babel/types" "^7.24.5"
- debug "^4.3.1"
- globals "^11.1.0"
-
-"@babel/traverse@^7.24.7", "@babel/traverse@^7.24.8", "@babel/traverse@^7.25.0", "@babel/traverse@^7.25.1", "@babel/traverse@^7.25.2", "@babel/traverse@^7.25.3", "@babel/traverse@^7.25.4":
+"@babel/traverse@^7.22.8", "@babel/traverse@^7.24.7", "@babel/traverse@^7.24.8", "@babel/traverse@^7.25.0", "@babel/traverse@^7.25.1", "@babel/traverse@^7.25.2", "@babel/traverse@^7.25.3", "@babel/traverse@^7.25.4":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.25.6.tgz#04fad980e444f182ecf1520504941940a90fea41"
integrity sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==
@@ -2008,7 +1191,7 @@
debug "^4.3.1"
globals "^11.1.0"
-"@babel/types@^7.21.3", "@babel/types@^7.24.7", "@babel/types@^7.24.8", "@babel/types@^7.25.0", "@babel/types@^7.25.2", "@babel/types@^7.25.6":
+"@babel/types@^7.21.3", "@babel/types@^7.24.7", "@babel/types@^7.24.8", "@babel/types@^7.25.0", "@babel/types@^7.25.2", "@babel/types@^7.25.6", "@babel/types@^7.4.4":
version "7.25.6"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.25.6.tgz#893942ddb858f32ae7a004ec9d3a76b3463ef8e6"
integrity sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==
@@ -2017,15 +1200,6 @@
"@babel/helper-validator-identifier" "^7.24.7"
to-fast-properties "^2.0.0"
-"@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.23.4", "@babel/types@^7.24.0", "@babel/types@^7.24.5", "@babel/types@^7.4.4":
- version "7.24.5"
- resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.5.tgz#7661930afc638a5383eb0c4aee59b74f38db84d7"
- integrity sha512-6mQNsaLeXTw0nxYUYu+NSa4Hx4BlF1x1x8/PMFbiR+GBSr+2DkECc69b8hgy2frEodNcvPffeH8YfWd3LI6jhQ==
- dependencies:
- "@babel/helper-string-parser" "^7.24.1"
- "@babel/helper-validator-identifier" "^7.24.5"
- to-fast-properties "^2.0.0"
-
"@cfaester/enzyme-adapter-react-18@^0.8.0":
version "0.8.0"
resolved "https://registry.yarnpkg.com/@cfaester/enzyme-adapter-react-18/-/enzyme-adapter-react-18-0.8.0.tgz#313814eb79658a6e74209f9f1743bcefff14a46f"
@@ -2042,34 +1216,35 @@
resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9"
integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==
-"@cspell/cspell-bundled-dicts@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-bundled-dicts/-/cspell-bundled-dicts-8.14.2.tgz#746706485228e055ff342a66191cb6b9e58e748a"
- integrity sha512-Kv2Utj/RTSxfufGXkkoTZ/3ErCsYWpCijtDFr/FwSsM7mC0PzLpdlcD9xjtgrJO5Kwp7T47iTG21U4Mwddyi8Q==
+"@cspell/cspell-bundled-dicts@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-bundled-dicts/-/cspell-bundled-dicts-8.14.3.tgz#c91e8be7c1890b963608bb2887ffb150795ed94f"
+ integrity sha512-O0QA2OD0IDatIxNinr5woWJ8uC3/fbMaOdu3R+142wMX3f2hB08Wfvk+anFgFVTgo90JQnmKEvlCZD1Q8QlWig==
dependencies:
"@cspell/dict-ada" "^4.0.2"
- "@cspell/dict-aws" "^4.0.3"
- "@cspell/dict-bash" "^4.1.3"
+ "@cspell/dict-aws" "^4.0.4"
+ "@cspell/dict-bash" "^4.1.4"
"@cspell/dict-companies" "^3.1.4"
- "@cspell/dict-cpp" "^5.1.12"
+ "@cspell/dict-cpp" "^5.1.16"
"@cspell/dict-cryptocurrencies" "^5.0.0"
"@cspell/dict-csharp" "^4.0.2"
"@cspell/dict-css" "^4.0.13"
- "@cspell/dict-dart" "^2.0.3"
+ "@cspell/dict-dart" "^2.2.1"
"@cspell/dict-django" "^4.1.0"
"@cspell/dict-docker" "^1.1.7"
- "@cspell/dict-dotnet" "^5.0.2"
+ "@cspell/dict-dotnet" "^5.0.5"
"@cspell/dict-elixir" "^4.0.3"
"@cspell/dict-en-common-misspellings" "^2.0.4"
"@cspell/dict-en-gb" "1.1.33"
"@cspell/dict-en_us" "^4.3.23"
"@cspell/dict-filetypes" "^3.0.4"
+ "@cspell/dict-flutter" "^1.0.0"
"@cspell/dict-fonts" "^4.0.0"
"@cspell/dict-fsharp" "^1.0.1"
"@cspell/dict-fullstack" "^3.2.0"
"@cspell/dict-gaming-terms" "^1.0.5"
"@cspell/dict-git" "^3.0.0"
- "@cspell/dict-golang" "^6.0.9"
+ "@cspell/dict-golang" "^6.0.12"
"@cspell/dict-google" "^1.0.1"
"@cspell/dict-haskell" "^4.0.1"
"@cspell/dict-html" "^4.0.5"
@@ -2083,73 +1258,73 @@
"@cspell/dict-makefile" "^1.0.0"
"@cspell/dict-monkeyc" "^1.0.6"
"@cspell/dict-node" "^5.0.1"
- "@cspell/dict-npm" "^5.0.18"
- "@cspell/dict-php" "^4.0.8"
- "@cspell/dict-powershell" "^5.0.5"
- "@cspell/dict-public-licenses" "^2.0.7"
- "@cspell/dict-python" "^4.2.4"
+ "@cspell/dict-npm" "^5.1.4"
+ "@cspell/dict-php" "^4.0.10"
+ "@cspell/dict-powershell" "^5.0.8"
+ "@cspell/dict-public-licenses" "^2.0.8"
+ "@cspell/dict-python" "^4.2.6"
"@cspell/dict-r" "^2.0.1"
- "@cspell/dict-ruby" "^5.0.2"
+ "@cspell/dict-ruby" "^5.0.3"
"@cspell/dict-rust" "^4.0.5"
"@cspell/dict-scala" "^5.0.3"
- "@cspell/dict-software-terms" "^4.0.6"
+ "@cspell/dict-software-terms" "^4.1.3"
"@cspell/dict-sql" "^2.1.5"
"@cspell/dict-svelte" "^1.0.2"
"@cspell/dict-swift" "^2.0.1"
- "@cspell/dict-terraform" "^1.0.0"
+ "@cspell/dict-terraform" "^1.0.1"
"@cspell/dict-typescript" "^3.1.6"
"@cspell/dict-vue" "^3.0.0"
-"@cspell/cspell-json-reporter@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-json-reporter/-/cspell-json-reporter-8.14.2.tgz#c05e6e0d6e63072e3f416c9b3088aa8329de19a1"
- integrity sha512-TZavcnNIZKX1xC/GNj80RgFVKHCT4pHT0qm9jCsQFH2QJfyCrUlkEvotKGSQ04lAyCwWg6Enq95qhouF8YbKUQ==
+"@cspell/cspell-json-reporter@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-json-reporter/-/cspell-json-reporter-8.14.3.tgz#8272dc695a766d602b8a9fbba0c4cc43ec9e2c2c"
+ integrity sha512-xJbIhSVL1LrwtIpFYvfbXMXv0GUqp2mFkTdG652zb4ZCjQUitmAN1eOhpUt2WHqyCdsMNjMcoJ05PNAN1LrLBQ==
dependencies:
- "@cspell/cspell-types" "8.14.2"
+ "@cspell/cspell-types" "8.14.3"
-"@cspell/cspell-pipe@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-pipe/-/cspell-pipe-8.14.2.tgz#8f9df27fff5c6c55e29fa6967768a38b5494a665"
- integrity sha512-aWMoXZAXEre0/M9AYWOW33YyOJZ06i4vvsEpWBDWpHpWQEmsR/7cMMgld8Pp3wlEjIUclUAKTYmrZ61PFWU/og==
+"@cspell/cspell-pipe@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-pipe/-/cspell-pipe-8.14.3.tgz#34b22050651c1bdfbf83085159701b7d0e32a0a2"
+ integrity sha512-vc4gcsQ/qLHcHHz1EmTLe0x1aZYUzkQAyIOTLRWFlsWrdztXQ3zSEaPB2JzgLNCaqJrYJPs5Wh/Uo+6w9ZaIeA==
-"@cspell/cspell-resolver@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-resolver/-/cspell-resolver-8.14.2.tgz#0cfaa0d0f613feab76ddb30a1d0a60d428726a0c"
- integrity sha512-pSyBsAvslaN0dx0pHdvECJEuFDDBJGAD6G8U4BVbIyj2OPk0Ox0HrZIj6csYxxoJERAgNO/q7yCPwa4j9NNFXg==
+"@cspell/cspell-resolver@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-resolver/-/cspell-resolver-8.14.3.tgz#9dcfc454934705cb07bc57d4c61f5c190428803b"
+ integrity sha512-SOG4LQS4rt93FnCyCsDfCxOuq+uTzco6zpncwMU1GgH8bSEEeiDphGsgmdgK7XxKNlr59o8JFeD+45AkJWHm5w==
dependencies:
global-directory "^4.0.1"
-"@cspell/cspell-service-bus@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-service-bus/-/cspell-service-bus-8.14.2.tgz#f0d317fd4de99700b2b7f10b05d88dc2ea55d7f8"
- integrity sha512-WUF7xf3YgXYIqjmBwLcVugYIrYL4WfXchgSo9rmbbnOcAArzsK+HKfzb4AniZAJ1unxcIQ0JnVlRmnCAKPjjLg==
+"@cspell/cspell-service-bus@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-service-bus/-/cspell-service-bus-8.14.3.tgz#a2f040ef0db46b7e21108aedf82d9ef41a977376"
+ integrity sha512-bqb+6KlFMVEprBlga1olLmZFWmsT267hmLZHhQoNKTlZJlzyQjmAd4XYUJyH9oEYOOt4t5PgqtZJSxudmq2SIw==
-"@cspell/cspell-types@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/cspell-types/-/cspell-types-8.14.2.tgz#e1bae42766b43ff6d943b57bb7830bc873f25c94"
- integrity sha512-MRY8MjBNOKGMDSkxAKueYAgVL43miO+lDcLCBBP+7cNXqHiUFMIZteONcGp3kJT0dWS04dN6lKAXvaNF0aWcng==
+"@cspell/cspell-types@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/cspell-types/-/cspell-types-8.14.3.tgz#c43452cb9cac1a47f02005c24f6600acfe863793"
+ integrity sha512-t8cvWSLVmgoAnmwMKXf0W1k3aWPPksTIqcNFMVF2f3m4dZh9HBh+M+xK9mXXScALmQev+psbvbiTjRlKD52ZnQ==
"@cspell/dict-ada@^4.0.2":
version "4.0.2"
resolved "https://registry.yarnpkg.com/@cspell/dict-ada/-/dict-ada-4.0.2.tgz#8da2216660aeb831a0d9055399a364a01db5805a"
integrity sha512-0kENOWQeHjUlfyId/aCM/mKXtkEgV0Zu2RhUXCBr4hHo9F9vph+Uu8Ww2b0i5a4ZixoIkudGA+eJvyxrG1jUpA==
-"@cspell/dict-aws@^4.0.3":
+"@cspell/dict-aws@^4.0.4":
version "4.0.4"
resolved "https://registry.yarnpkg.com/@cspell/dict-aws/-/dict-aws-4.0.4.tgz#638a38df18c93d5cb74607b24ab4e1498825d565"
integrity sha512-6AWI/Kkf+RcX/J81VX8+GKLeTgHWEr/OMhGk3dHQzWK66RaqDJCGDqi7494ghZKcBB7dGa3U5jcKw2FZHL/u3w==
-"@cspell/dict-bash@^4.1.3":
- version "4.1.3"
- resolved "https://registry.yarnpkg.com/@cspell/dict-bash/-/dict-bash-4.1.3.tgz#25fba40825ac10083676ab2c777e471c3f71b36e"
- integrity sha512-tOdI3QVJDbQSwPjUkOiQFhYcu2eedmX/PtEpVWg0aFps/r6AyjUQINtTgpqMYnYuq8O1QUIQqnpx21aovcgZCw==
+"@cspell/dict-bash@^4.1.4":
+ version "4.1.4"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-bash/-/dict-bash-4.1.4.tgz#a7942df189d3cc5ebced5b877d64ddbb24301137"
+ integrity sha512-W/AHoQcJYn3Vn/tUiXX2+6D/bhfzdDshwcbQWv9TdiNlXP9P6UJjDKWbxyA5ogJCsR2D0X9Kx11oV8E58siGKQ==
"@cspell/dict-companies@^3.1.4":
version "3.1.4"
resolved "https://registry.yarnpkg.com/@cspell/dict-companies/-/dict-companies-3.1.4.tgz#2e7094416432b8547ec335683f5aac9a49dce47e"
integrity sha512-y9e0amzEK36EiiKx3VAA+SHQJPpf2Qv5cCt5eTUSggpTkiFkCh6gRKQ97rVlrKh5GJrqinDwYIJtTsxuh2vy2Q==
-"@cspell/dict-cpp@^5.1.12":
+"@cspell/dict-cpp@^5.1.16":
version "5.1.16"
resolved "https://registry.yarnpkg.com/@cspell/dict-cpp/-/dict-cpp-5.1.16.tgz#e6557d5b916ebff02045b60f7016749e085921b0"
integrity sha512-32fU5RkuOM55IRcxjByiSoKbjr+C4danDfYjHaQNRWdvjzJzci3fLDGA2wTXiclkgDODxGiV8LCTUwCz+3TNWA==
@@ -2169,10 +1344,10 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-css/-/dict-css-4.0.13.tgz#b95310ba67694d25bcb055786dde65e091621d14"
integrity sha512-WfOQkqlAJTo8eIQeztaH0N0P+iF5hsJVKFuhy4jmARPISy8Efcv8QXk2/IVbmjJH0/ZV7dKRdnY5JFVXuVz37g==
-"@cspell/dict-dart@^2.0.3":
- version "2.0.3"
- resolved "https://registry.yarnpkg.com/@cspell/dict-dart/-/dict-dart-2.0.3.tgz#75e7ffe47d5889c2c831af35acdd92ebdbd4cf12"
- integrity sha512-cLkwo1KT5CJY5N5RJVHks2genFkNCl/WLfj+0fFjqNR+tk3tBI1LY7ldr9piCtSFSm4x9pO1x6IV3kRUY1lLiw==
+"@cspell/dict-dart@^2.2.1":
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-dart/-/dict-dart-2.2.1.tgz#d5ef7632240cb19c8892e66ba5ed1089ab8e46a3"
+ integrity sha512-yriKm7QkoPx3JPSSOcw6iX9gOb2N50bOo/wqWviqPYbhpMRh9Xiv6dkUy3+ot+21GuShZazO8X6U5+Vw67XEwg==
"@cspell/dict-data-science@^2.0.1":
version "2.0.1"
@@ -2189,10 +1364,10 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-docker/-/dict-docker-1.1.7.tgz#bcf933283fbdfef19c71a642e7e8c38baf9014f2"
integrity sha512-XlXHAr822euV36GGsl2J1CkBIVg3fZ6879ZOg5dxTIssuhUOCiV2BuzKZmt6aIFmcdPmR14+9i9Xq+3zuxeX0A==
-"@cspell/dict-dotnet@^5.0.2":
- version "5.0.2"
- resolved "https://registry.yarnpkg.com/@cspell/dict-dotnet/-/dict-dotnet-5.0.2.tgz#d89ca8fa2e546b5e1b1f1288746d26bb627d9f38"
- integrity sha512-UD/pO2A2zia/YZJ8Kck/F6YyDSpCMq0YvItpd4YbtDVzPREfTZ48FjZsbYi4Jhzwfvc6o8R56JusAE58P+4sNQ==
+"@cspell/dict-dotnet@^5.0.5":
+ version "5.0.5"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-dotnet/-/dict-dotnet-5.0.5.tgz#0f614ef6fa052e7598a6fe20770a1e5bb19f0de1"
+ integrity sha512-gjg0L97ee146wX47dnA698cHm85e7EOpf9mVrJD8DmEaqoo/k1oPy2g7c7LgKxK9XnqwoXxhLNnngPrwXOoEtQ==
"@cspell/dict-elixir@^4.0.3":
version "4.0.3"
@@ -2219,6 +1394,11 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-filetypes/-/dict-filetypes-3.0.4.tgz#aca71c7bb8c8805b54f382d98ded5ec75ebc1e36"
integrity sha512-IBi8eIVdykoGgIv5wQhOURi5lmCNJq0we6DvqKoPQJHthXbgsuO1qrHSiUVydMiQl/XvcnUWTMeAlVUlUClnVg==
+"@cspell/dict-flutter@^1.0.0":
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-flutter/-/dict-flutter-1.0.0.tgz#9179aeb5e7544c061ffa3e5080a4c015f88efee3"
+ integrity sha512-W7k1VIc4KeV8BjEBxpA3cqpzbDWjfb7oXkEb0LecBCBp5Z7kcfnjT1YVotTx/U9PGyAOBhDaEdgZACVGNQhayw==
+
"@cspell/dict-fonts@^4.0.0":
version "4.0.0"
resolved "https://registry.yarnpkg.com/@cspell/dict-fonts/-/dict-fonts-4.0.0.tgz#9bc8beb2a7b068b4fdb45cb994b36fd184316327"
@@ -2244,10 +1424,10 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-git/-/dict-git-3.0.0.tgz#c275af86041a2b59a7facce37525e2af05653b95"
integrity sha512-simGS/lIiXbEaqJu9E2VPoYW1OTC2xrwPPXNXFMa2uo/50av56qOuaxDrZ5eH1LidFXwoc8HROCHYeKoNrDLSw==
-"@cspell/dict-golang@^6.0.9":
- version "6.0.9"
- resolved "https://registry.yarnpkg.com/@cspell/dict-golang/-/dict-golang-6.0.9.tgz#b26ee13fb34a8cd40fb22380de8a46b25739fcab"
- integrity sha512-etDt2WQauyEQDA+qPS5QtkYTb2I9l5IfQftAllVoB1aOrT6bxxpHvMEpJ0Hsn/vezxrCqa/BmtUbRxllIxIuSg==
+"@cspell/dict-golang@^6.0.12":
+ version "6.0.12"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-golang/-/dict-golang-6.0.12.tgz#a9d4c53edfec34d06a226a9af6af0df899bd720f"
+ integrity sha512-LEPeoqd+4O+vceHF73S7D7+LYfrAjOvp4Dqzh4MT30ruzlQ77yHRSuYOJtrFN1GK5ntAt/ILSVOKg9sgsz1Llg==
"@cspell/dict-google@^1.0.1":
version "1.0.1"
@@ -2314,27 +1494,27 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-node/-/dict-node-5.0.1.tgz#77e17c576a897a3391fce01c1cc5da60bb4c2268"
integrity sha512-lax/jGz9h3Dv83v8LHa5G0bf6wm8YVRMzbjJPG/9rp7cAGPtdrga+XANFq+B7bY5+jiSA3zvj10LUFCFjnnCCg==
-"@cspell/dict-npm@^5.0.18":
+"@cspell/dict-npm@^5.1.4":
version "5.1.4"
resolved "https://registry.yarnpkg.com/@cspell/dict-npm/-/dict-npm-5.1.4.tgz#2359f2c0256080aea0a01440c6f3e78ea75df64d"
integrity sha512-yzqVTY4P5neom4z9orV2IFOqDZ7fDotmisP7nwQkEmftoELgn5CUtNdnJhWDoDQQn6yrxOxA8jEqmyETIWzN4Q==
-"@cspell/dict-php@^4.0.8":
- version "4.0.8"
- resolved "https://registry.yarnpkg.com/@cspell/dict-php/-/dict-php-4.0.8.tgz#fedce3109dff13a0f3d8d88ba604d6edd2b9fb70"
- integrity sha512-TBw3won4MCBQ2wdu7kvgOCR3dY2Tb+LJHgDUpuquy3WnzGiSDJ4AVelrZdE1xu7mjFJUr4q48aB21YT5uQqPZA==
+"@cspell/dict-php@^4.0.10":
+ version "4.0.10"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-php/-/dict-php-4.0.10.tgz#e2ad4d3e30ec009824d9663a795f6281ae39caaf"
+ integrity sha512-NfTZdp6kcZDF1PvgQ6cY0zE4FUO5rSwNmBH/iwCBuaLfJAFQ97rgjxo+D2bic4CFwNjyHutnHPtjJBRANO5XQw==
-"@cspell/dict-powershell@^5.0.5":
+"@cspell/dict-powershell@^5.0.8":
version "5.0.8"
resolved "https://registry.yarnpkg.com/@cspell/dict-powershell/-/dict-powershell-5.0.8.tgz#185c454c633e72ebd708328f2cf6dbbc5028ae0a"
integrity sha512-Eg64BccQp5oEJ+V/O2G27KaLWmuOL2AWMOs2470adUihOleRfW8j9XwAEGCS+JKSnDb2mksWA72Z6kDqH138IQ==
-"@cspell/dict-public-licenses@^2.0.7":
- version "2.0.7"
- resolved "https://registry.yarnpkg.com/@cspell/dict-public-licenses/-/dict-public-licenses-2.0.7.tgz#ccd67a91a6bd5ed4b5117c2f34e9361accebfcb7"
- integrity sha512-KlBXuGcN3LE7tQi/GEqKiDewWGGuopiAD0zRK1QilOx5Co8XAvs044gk4MNIQftc8r0nHeUI+irJKLGcR36DIQ==
+"@cspell/dict-public-licenses@^2.0.8":
+ version "2.0.8"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-public-licenses/-/dict-public-licenses-2.0.8.tgz#ed8c3b5b22f28129cf3517821740599f05733b68"
+ integrity sha512-Sup+tFS7cDV0fgpoKtUqEZ6+fA/H+XUgBiqQ/Fbs6vUE3WCjJHOIVsP+udHuyMH7iBfJ4UFYOYeORcY4EaKdMg==
-"@cspell/dict-python@^4.2.4":
+"@cspell/dict-python@^4.2.6":
version "4.2.6"
resolved "https://registry.yarnpkg.com/@cspell/dict-python/-/dict-python-4.2.6.tgz#fce9950d59c6707442af04701d4ed7c7be333901"
integrity sha512-Hkz399qDGEbfXi9GYa2hDl7GahglI86JmS2F1KP8sfjLXofUgtnknyC5NWc86nzHcP38pZiPqPbTigyDYw5y8A==
@@ -2346,10 +1526,10 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-r/-/dict-r-2.0.1.tgz#73474fb7cce45deb9094ebf61083fbf5913f440a"
integrity sha512-KCmKaeYMLm2Ip79mlYPc8p+B2uzwBp4KMkzeLd5E6jUlCL93Y5Nvq68wV5fRLDRTf7N1LvofkVFWfDcednFOgA==
-"@cspell/dict-ruby@^5.0.2":
- version "5.0.2"
- resolved "https://registry.yarnpkg.com/@cspell/dict-ruby/-/dict-ruby-5.0.2.tgz#cf1a71380c633dec0857143d3270cb503b10679a"
- integrity sha512-cIh8KTjpldzFzKGgrqUX4bFyav5lC52hXDKo4LbRuMVncs3zg4hcSf4HtURY+f2AfEZzN6ZKzXafQpThq3dl2g==
+"@cspell/dict-ruby@^5.0.3":
+ version "5.0.3"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-ruby/-/dict-ruby-5.0.3.tgz#614e9a3d4dcd720e750c037b9dfb6001da8b25e0"
+ integrity sha512-V1xzv9hN6u8r6SM4CkYdsxs4ov8gjXXo0Twfx5kWhLXbEVxTXDMt7ohLTqpy2XlF5mutixZdbHMeFiAww8v+Ug==
"@cspell/dict-rust@^4.0.5":
version "4.0.5"
@@ -2361,7 +1541,7 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-scala/-/dict-scala-5.0.3.tgz#85a469b2d139766b6307befc89243928e3d82b39"
integrity sha512-4yGb4AInT99rqprxVNT9TYb1YSpq58Owzq7zi3ZS5T0u899Y4VsxsBiOgHnQ/4W+ygi+sp+oqef8w8nABR2lkg==
-"@cspell/dict-software-terms@^4.0.6":
+"@cspell/dict-software-terms@^4.1.3":
version "4.1.3"
resolved "https://registry.yarnpkg.com/@cspell/dict-software-terms/-/dict-software-terms-4.1.3.tgz#364ff43034900dc508af376f06042de75ed53297"
integrity sha512-5Wn5JG4IzCboX5pjISdkipsPKGaz1//iuBZdHl4US5x7mO4jOGXLpjzx6ZoPM4PXUlMEFz9NJRCDepAu8fXVtA==
@@ -2381,10 +1561,10 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-swift/-/dict-swift-2.0.1.tgz#06ec86e52e9630c441d3c19605657457e33d7bb6"
integrity sha512-gxrCMUOndOk7xZFmXNtkCEeroZRnS2VbeaIPiymGRHj5H+qfTAzAKxtv7jJbVA3YYvEzWcVE2oKDP4wcbhIERw==
-"@cspell/dict-terraform@^1.0.0":
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/@cspell/dict-terraform/-/dict-terraform-1.0.0.tgz#c7b073bb3a03683f64cc70ccaa55ce9742c46086"
- integrity sha512-Ak+vy4HP/bOgzf06BAMC30+ZvL9mzv21xLM2XtfnBLTDJGdxlk/nK0U6QT8VfFLqJ0ZZSpyOxGsUebWDCTr/zQ==
+"@cspell/dict-terraform@^1.0.1":
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/@cspell/dict-terraform/-/dict-terraform-1.0.1.tgz#81648af2e7f19e8b3188be5e7b1a2d2b6627f58b"
+ integrity sha512-29lmUUnZgPh+ieZ5hunick8hzNIpNRtiJh9vAusNskPCrig3RTW6u7F+GG1a8uyslbzSw+Irjf40PTOan1OJJA==
"@cspell/dict-typescript@^3.1.6":
version "3.1.6"
@@ -2396,46 +1576,46 @@
resolved "https://registry.yarnpkg.com/@cspell/dict-vue/-/dict-vue-3.0.0.tgz#68ccb432ad93fcb0fd665352d075ae9a64ea9250"
integrity sha512-niiEMPWPV9IeRBRzZ0TBZmNnkK3olkOPYxC1Ny2AX4TGlYRajcW0WUtoSHmvvjZNfWLSg2L6ruiBeuPSbjnG6A==
-"@cspell/dynamic-import@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/dynamic-import/-/dynamic-import-8.14.2.tgz#4968d1b6b20806f9bb78b92c628300cad288072f"
- integrity sha512-5MbqtIligU7yPwHWU/5yFCgMvur4i1bRAF1Cy8y2dDtHsa204S/w/SaXs+51EFLp2eNbCiBisCBrwJFT7R1RxA==
+"@cspell/dynamic-import@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/dynamic-import/-/dynamic-import-8.14.3.tgz#12ecbf239eb84051cef566f677ae5e83498ee661"
+ integrity sha512-LlWDTfQO2V3CAwax5PlQnS2prLs0icWfaROaNrIoSe8X6OUerfoxJ9p3Bpx0AxSKv4FtvYQraRV/UNPWRhhHag==
dependencies:
import-meta-resolve "^4.1.0"
-"@cspell/filetypes@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/filetypes/-/filetypes-8.14.2.tgz#e05d48a504efa72d9cebd9fb45b666e116673335"
- integrity sha512-ZevArA0mWeVTTqHicxCPZIAeCibpY3NwWK/x6d1Lgu7RPk/daoGAM546Q2SLChFu+r10tIH7pRG212A6Q9ihPA==
+"@cspell/filetypes@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/filetypes/-/filetypes-8.14.3.tgz#ab41000b6586215013e776585f31fb574d9b9739"
+ integrity sha512-a1BbKi3BcSju9owpa37x6I3sZtwpnzpRslSsV7IUBI8k85nfH+TiFm0toEEj/8jFJKehBWr83kMjZFHQReV13g==
-"@cspell/strong-weak-map@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/strong-weak-map/-/strong-weak-map-8.14.2.tgz#4fd8fd616690125775c0f7a596a1295b03e9a43d"
- integrity sha512-7sRzJc392CQYNNrtdPEfOHJdRqsqf6nASCtbS5A9hL2UrdWQ4uN7r/D+Y1HpuizwY9eOkZvarcFfsYt5wE0Pug==
+"@cspell/strong-weak-map@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/strong-weak-map/-/strong-weak-map-8.14.3.tgz#976de6a7cf62d5a4a15c7d99afa7458ba57567c9"
+ integrity sha512-ZC5HiGbvD3vCA1pj7FL5gwyOi3OeXa51TDDRSjMW5A9XOpr57ptKxlSCwFVxNfszPTfMKP19a81rz1jo3GyFMg==
-"@cspell/url@8.14.2":
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/@cspell/url/-/url-8.14.2.tgz#8a6d30011596ccefb48a6135225554011c46dc03"
- integrity sha512-YmWW+B/2XQcCynLpiAQF77Bitm5Cynw3/BICZkbdveKjJkUzEmXB+U2qWuwXOyU8xUYuwkP63YM8McnI567rUA==
+"@cspell/url@8.14.3":
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/@cspell/url/-/url-8.14.3.tgz#74d497e7f323a214fe582e764e47a3af30acb2ff"
+ integrity sha512-r7fVsgOBu1qpt4UH45mE3TZfW7H+CHzK3INRUjSsrWKaiyE57mD6IIoqYdjCDoP4xaMRrHGC5SBKy+eX3prsBQ==
"@discoveryjs/json-ext@0.5.7":
version "0.5.7"
resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz#1d572bfbbe14b7704e0ba0f39b74815b84870d70"
integrity sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==
-"@docsearch/css@3.6.0":
- version "3.6.0"
- resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.6.0.tgz#0e9f56f704b3a34d044d15fd9962ebc1536ba4fb"
- integrity sha512-+sbxb71sWre+PwDK7X2T8+bhS6clcVMLwBPznX45Qu6opJcgRjAp7gYSDzVFp187J+feSj5dNBN1mJoi6ckkUQ==
+"@docsearch/css@3.6.1":
+ version "3.6.1"
+ resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.6.1.tgz#f0a728ecb486c81f2d282650fc1820c914913408"
+ integrity sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==
"@docsearch/react@^3.5.2":
- version "3.6.0"
- resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.6.0.tgz#b4f25228ecb7fc473741aefac592121e86dd2958"
- integrity sha512-HUFut4ztcVNmqy9gp/wxNbC7pTOHhgVVkHVGCACTuLhUKUhKAF9KYHJtMiLUJxEqiFLQiuri1fWF8zqwM/cu1w==
+ version "3.6.1"
+ resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.6.1.tgz#0f826df08693293806d64277d6d9c38636211b97"
+ integrity sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==
dependencies:
"@algolia/autocomplete-core" "1.9.3"
"@algolia/autocomplete-preset-algolia" "1.9.3"
- "@docsearch/css" "3.6.0"
+ "@docsearch/css" "3.6.1"
algoliasearch "^4.19.1"
"@docusaurus/core@3.5.2":
@@ -2938,9 +2118,9 @@
"@jridgewell/trace-mapping" "^0.3.25"
"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14":
- version "1.4.15"
- resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
- integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
+ version "1.5.0"
+ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a"
+ integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==
"@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25":
version "0.3.25"
@@ -3025,9 +2205,9 @@
graceful-fs "4.2.10"
"@pnpm/npm-conf@^2.1.0":
- version "2.2.2"
- resolved "https://registry.yarnpkg.com/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz#0058baf1c26cbb63a828f0193795401684ac86f0"
- integrity sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==
+ version "2.3.1"
+ resolved "https://registry.yarnpkg.com/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz#bb375a571a0bd63ab0a23bece33033c683e9b6b0"
+ integrity sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw==
dependencies:
"@pnpm/config.env-replace" "^1.1.0"
"@pnpm/network.ca-file" "^1.0.1"
@@ -3038,26 +2218,26 @@
resolved "https://registry.yarnpkg.com/@polka/url/-/url-1.0.0-next.25.tgz#f077fdc0b5d0078d30893396ff4827a13f99e817"
integrity sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==
-"@redocly/ajv@^8.11.0":
- version "8.11.0"
- resolved "https://registry.yarnpkg.com/@redocly/ajv/-/ajv-8.11.0.tgz#2fad322888dc0113af026e08fceb3e71aae495ae"
- integrity sha512-9GWx27t7xWhDIR02PA18nzBdLcKQRgc46xNQvjFkrYk4UOmvKhJ/dawwiX0cCOeetN5LcaaiqQbVOWYK62SGHw==
+"@redocly/ajv@^8.11.0", "@redocly/ajv@^8.11.2":
+ version "8.11.2"
+ resolved "https://registry.yarnpkg.com/@redocly/ajv/-/ajv-8.11.2.tgz#46e1bf321ec0ac1e0fd31dea41a3d1fcbdcda0b5"
+ integrity sha512-io1JpnwtIcvojV7QKDUSIuMN/ikdOUd1ReEnUnMKGfDVridQZ31J0MmIuqwuRjWDZfmvr+Q0MqCcfHM2gTivOg==
dependencies:
fast-deep-equal "^3.1.1"
json-schema-traverse "^1.0.0"
require-from-string "^2.0.2"
- uri-js "^4.2.2"
+ uri-js-replace "^1.0.1"
+
+"@redocly/config@^0.10.1":
+ version "0.10.1"
+ resolved "https://registry.yarnpkg.com/@redocly/config/-/config-0.10.1.tgz#c7bcbab6cb3b82236c2f5c87aa44924a652d8e80"
+ integrity sha512-H3LnKVGzOaxskwJu8pmJYwBOWjP61qOK7TuTrbafqArDVckE06fhA6l0nO4KvBbjLPjy1Al7UnlxOu23V4Nl0w==
"@redocly/config@^0.6.0":
version "0.6.3"
resolved "https://registry.yarnpkg.com/@redocly/config/-/config-0.6.3.tgz#0dab6278721abd5aae6b375deee97665005b0472"
integrity sha512-hGWJgCsXRw0Ow4rplqRlUQifZvoSwZipkYnt11e3SeH1Eb23VUIDBcRuaQOUqy1wn0eevXkU2GzzQ8fbKdQ7Mg==
-"@redocly/config@^0.7.0":
- version "0.7.0"
- resolved "https://registry.yarnpkg.com/@redocly/config/-/config-0.7.0.tgz#e8d06dc1f2d9cb9a4b5c5ce09afbf8536b32161c"
- integrity sha512-6GKxTo/9df0654Mtivvr4lQnMOp+pRj9neVywmI5+BwfZLTtkJnj2qB3D6d8FHTr4apsNOf6zTa5FojX0Evh4g==
-
"@redocly/openapi-core@1.16.0":
version "1.16.0"
resolved "https://registry.yarnpkg.com/@redocly/openapi-core/-/openapi-core-1.16.0.tgz#95afcf822890af3fe8f1bde97018370b5cadb8ca"
@@ -3076,12 +2256,12 @@
yaml-ast-parser "0.0.43"
"@redocly/openapi-core@^1.4.0":
- version "1.19.0"
- resolved "https://registry.yarnpkg.com/@redocly/openapi-core/-/openapi-core-1.19.0.tgz#8c6db2f0286b7776d79e392335f89f702ea19432"
- integrity sha512-ezK6qr80sXvjDgHNrk/zmRs9vwpIAeHa0T/qmo96S+ib4ThQ5a8f3qjwEqxMeVxkxCTbkaY9sYSJKOxv4ejg5w==
+ version "1.25.2"
+ resolved "https://registry.yarnpkg.com/@redocly/openapi-core/-/openapi-core-1.25.2.tgz#8b1d093cf486601889244535217e9693e6fdbe22"
+ integrity sha512-6lJ3cRwi9xFkz/DDSmZNiv16hrzUeDoiZJQppSckKmw7MHkARu9gqRNARxUjtOAywqvpGMneASVIF9ogmalrQg==
dependencies:
- "@redocly/ajv" "^8.11.0"
- "@redocly/config" "^0.7.0"
+ "@redocly/ajv" "^8.11.2"
+ "@redocly/config" "^0.10.1"
colorette "^1.2.0"
https-proxy-agent "^7.0.4"
js-levenshtein "^1.1.6"
@@ -3313,9 +2493,9 @@
integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==
"@types/express-serve-static-core@*", "@types/express-serve-static-core@^4.17.33":
- version "4.19.0"
- resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.19.0.tgz#3ae8ab3767d98d0b682cda063c3339e1e86ccfaa"
- integrity sha512-bGyep3JqPCRry1wq+O5n7oiBgGWmeIJXPjXXCo8EK0u8duZGSYar7cGqd3ML2JUsLGeB7fmc06KYo9fLGWqPvQ==
+ version "4.19.5"
+ resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.19.5.tgz#218064e321126fcf9048d1ca25dd2465da55d9c6"
+ integrity sha512-y6W03tvrACO72aijJ5uF02FRq5cgDR9lUxddQ8vyF+GvmjJQqbzDcJngEjURc+ZsG31VI3hODNZJ2URj86pzmg==
dependencies:
"@types/node" "*"
"@types/qs" "*"
@@ -3365,9 +2545,9 @@
integrity sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==
"@types/http-proxy@^1.17.8":
- version "1.17.14"
- resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.14.tgz#57f8ccaa1c1c3780644f8a94f9c6b5000b5e2eec"
- integrity sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==
+ version "1.17.15"
+ resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.15.tgz#12118141ce9775a6499ecb4c01d02f90fc839d36"
+ integrity sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ==
dependencies:
"@types/node" "*"
@@ -3396,9 +2576,9 @@
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
"@types/mdast@^4.0.0", "@types/mdast@^4.0.2":
- version "4.0.3"
- resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-4.0.3.tgz#1e011ff013566e919a4232d1701ad30d70cab333"
- integrity sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==
+ version "4.0.4"
+ resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-4.0.4.tgz#7ccf72edd2f1aa7dd3437e180c64373585804dd6"
+ integrity sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==
dependencies:
"@types/unist" "*"
@@ -3425,11 +2605,11 @@
"@types/node" "*"
"@types/node@*":
- version "20.12.8"
- resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.8.tgz#35897bf2bfe3469847ab04634636de09552e8256"
- integrity sha512-NU0rJLJnshZWdE/097cdCBbyW1h4hEg0xpovcoAQYHl8dnEyp/NAOiE45pvc+Bd1Dt+2r94v2eGFpQJ4R7g+2w==
+ version "22.5.5"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-22.5.5.tgz#52f939dd0f65fc552a4ad0b392f3c466cc5d7a44"
+ integrity sha512-Xjs4y5UPO/CLdzpgR6GirZJx36yScjh73+2NlLlkFRSoQN8B0DpfXPdZGnvVmLRLOsqDpOfTNv7D9trgGhmOIA==
dependencies:
- undici-types "~5.26.4"
+ undici-types "~6.19.2"
"@types/node@^17.0.5":
version "17.0.45"
@@ -3442,19 +2622,19 @@
integrity sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==
"@types/prismjs@^1.26.0":
- version "1.26.3"
- resolved "https://registry.yarnpkg.com/@types/prismjs/-/prismjs-1.26.3.tgz#47fe8e784c2dee24fe636cab82e090d3da9b7dec"
- integrity sha512-A0D0aTXvjlqJ5ZILMz3rNfDBOx9hHxLZYv2by47Sm/pqW35zzjusrZTryatjN/Rf8Us2gZrJD+KeHbUSTux1Cw==
+ version "1.26.4"
+ resolved "https://registry.yarnpkg.com/@types/prismjs/-/prismjs-1.26.4.tgz#1a9e1074619ce1d7322669e5b46fbe823925103a"
+ integrity sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==
"@types/prop-types@*":
- version "15.7.12"
- resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.12.tgz#12bb1e2be27293c1406acb6af1c3f3a1481d98c6"
- integrity sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==
+ version "15.7.13"
+ resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.13.tgz#2af91918ee12d9d32914feb13f5326658461b451"
+ integrity sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==
"@types/qs@*":
- version "6.9.15"
- resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce"
- integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==
+ version "6.9.16"
+ resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.16.tgz#52bba125a07c0482d26747d5d4947a64daf8f794"
+ integrity sha512-7i+zxXdPD0T4cKDuxCUXJ4wHcsJLwENa6Z3dCu8cfCK743OGy5Nu1RmAGqDPsoTDINVEcdXKRvR/zre+P2Ku1A==
"@types/range-parser@*":
version "1.2.7"
@@ -3488,9 +2668,9 @@
"@types/react" "*"
"@types/react@*":
- version "18.3.1"
- resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.1.tgz#fed43985caa834a2084d002e4771e15dfcbdbe8e"
- integrity sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw==
+ version "18.3.7"
+ resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.7.tgz#6decbfbb01f8d82d56ff5403394121940faa6569"
+ integrity sha512-KUnDCJF5+AiZd8owLIeVHqmW9yM4sqmDVf2JRJiBMFkGvkoZ4/WyV2lL4zVsoinmRS/W3FeEdZLEWFRofnT2FQ==
dependencies:
"@types/prop-types" "*"
csstype "^3.0.2"
@@ -3544,19 +2724,19 @@
integrity sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==
"@types/unist@*", "@types/unist@^3.0.0":
- version "3.0.2"
- resolved "https://registry.yarnpkg.com/@types/unist/-/unist-3.0.2.tgz#6dd61e43ef60b34086287f83683a5c1b2dc53d20"
- integrity sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==
+ version "3.0.3"
+ resolved "https://registry.yarnpkg.com/@types/unist/-/unist-3.0.3.tgz#acaab0f919ce69cce629c2d4ed2eb4adc1b6c20c"
+ integrity sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==
"@types/unist@^2.0.0":
- version "2.0.10"
- resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.10.tgz#04ffa7f406ab628f7f7e97ca23e290cd8ab15efc"
- integrity sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==
+ version "2.0.11"
+ resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.11.tgz#11af57b127e32487774841f7a4e54eab166d03c4"
+ integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==
"@types/ws@^8.5.5":
- version "8.5.10"
- resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.10.tgz#4acfb517970853fa6574a3a6886791d04a396787"
- integrity sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==
+ version "8.5.12"
+ resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.12.tgz#619475fe98f35ccca2a2f6c137702d85ec247b7e"
+ integrity sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ==
dependencies:
"@types/node" "*"
@@ -3566,9 +2746,9 @@
integrity sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==
"@types/yargs@^17.0.8":
- version "17.0.32"
- resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.32.tgz#030774723a2f7faafebf645f4e5a48371dca6229"
- integrity sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==
+ version "17.0.33"
+ resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.33.tgz#8c32303da83eec050a84b3c7ae7b9f922d13e32d"
+ integrity sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==
dependencies:
"@types/yargs-parser" "*"
@@ -3727,14 +2907,16 @@ acorn-jsx@^5.0.0:
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
acorn-walk@^8.0.0:
- version "8.3.2"
- resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.2.tgz#7703af9415f1b6db9315d6895503862e231d34aa"
- integrity sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==
+ version "8.3.4"
+ resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.4.tgz#794dd169c3977edf4ba4ea47583587c5866236b7"
+ integrity sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==
+ dependencies:
+ acorn "^8.11.0"
-acorn@^8.0.0, acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2:
- version "8.11.3"
- resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a"
- integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==
+acorn@^8.0.0, acorn@^8.0.4, acorn@^8.11.0, acorn@^8.7.1, acorn@^8.8.2:
+ version "8.12.1"
+ resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.12.1.tgz#71616bdccbe25e27a54439e0046e89ca76df2248"
+ integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==
address@^1.0.1, address@^1.1.2:
version "1.2.2"
@@ -3786,42 +2968,42 @@ ajv@^6.12.2, ajv@^6.12.5:
uri-js "^4.2.2"
ajv@^8.0.0, ajv@^8.9.0:
- version "8.13.0"
- resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.13.0.tgz#a3939eaec9fb80d217ddf0c3376948c023f28c91"
- integrity sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==
+ version "8.17.1"
+ resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.17.1.tgz#37d9a5c776af6bc92d7f4f9510eba4c0a60d11a6"
+ integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==
dependencies:
fast-deep-equal "^3.1.3"
+ fast-uri "^3.0.1"
json-schema-traverse "^1.0.0"
require-from-string "^2.0.2"
- uri-js "^4.4.1"
algoliasearch-helper@^3.13.3:
- version "3.19.0"
- resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.19.0.tgz#56f9c61f46ecb0a0f7497f127a5d32a94d87e090"
- integrity sha512-AaSb5DZDMZmDQyIy6lf4aL0OZGgyIdqvLIIvSuVQOIOqfhrYSY7TvotIFI2x0Q3cP3xUpTd7lI1astUC4aXBJw==
+ version "3.22.5"
+ resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz#2fcc26814e10a121a2c2526a1b05c754061c56c0"
+ integrity sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==
dependencies:
"@algolia/events" "^4.0.1"
algoliasearch@^4.18.0, algoliasearch@^4.19.1:
- version "4.23.3"
- resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.23.3.tgz#e09011d0a3b0651444916a3e6bbcba064ec44b60"
- integrity sha512-Le/3YgNvjW9zxIQMRhUHuhiUjAlKY/zsdZpfq4dlLqg6mEm0nL6yk+7f2hDOtLpxsgE4jSzDmvHL7nXdBp5feg==
- dependencies:
- "@algolia/cache-browser-local-storage" "4.23.3"
- "@algolia/cache-common" "4.23.3"
- "@algolia/cache-in-memory" "4.23.3"
- "@algolia/client-account" "4.23.3"
- "@algolia/client-analytics" "4.23.3"
- "@algolia/client-common" "4.23.3"
- "@algolia/client-personalization" "4.23.3"
- "@algolia/client-search" "4.23.3"
- "@algolia/logger-common" "4.23.3"
- "@algolia/logger-console" "4.23.3"
- "@algolia/recommend" "4.23.3"
- "@algolia/requester-browser-xhr" "4.23.3"
- "@algolia/requester-common" "4.23.3"
- "@algolia/requester-node-http" "4.23.3"
- "@algolia/transporter" "4.23.3"
+ version "4.24.0"
+ resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.24.0.tgz#b953b3e2309ef8f25da9de311b95b994ac918275"
+ integrity sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==
+ dependencies:
+ "@algolia/cache-browser-local-storage" "4.24.0"
+ "@algolia/cache-common" "4.24.0"
+ "@algolia/cache-in-memory" "4.24.0"
+ "@algolia/client-account" "4.24.0"
+ "@algolia/client-analytics" "4.24.0"
+ "@algolia/client-common" "4.24.0"
+ "@algolia/client-personalization" "4.24.0"
+ "@algolia/client-search" "4.24.0"
+ "@algolia/logger-common" "4.24.0"
+ "@algolia/logger-console" "4.24.0"
+ "@algolia/recommend" "4.24.0"
+ "@algolia/requester-browser-xhr" "4.24.0"
+ "@algolia/requester-common" "4.24.0"
+ "@algolia/requester-node-http" "4.24.0"
+ "@algolia/transporter" "4.24.0"
ansi-align@^3.0.1:
version "3.0.1"
@@ -3841,9 +3023,9 @@ ansi-regex@^5.0.1:
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
ansi-regex@^6.0.1:
- version "6.0.1"
- resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a"
- integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==
+ version "6.1.0"
+ resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654"
+ integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==
ansi-styles@^3.2.1:
version "3.2.1"
@@ -3934,33 +3116,21 @@ ast-types@^0.13.4:
tslib "^2.0.1"
astring@^1.8.0:
- version "1.8.6"
- resolved "https://registry.yarnpkg.com/astring/-/astring-1.8.6.tgz#2c9c157cf1739d67561c56ba896e6948f6b93731"
- integrity sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==
+ version "1.9.0"
+ resolved "https://registry.yarnpkg.com/astring/-/astring-1.9.0.tgz#cc73e6062a7eb03e7d19c22d8b0b3451fd9bfeef"
+ integrity sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==
async@^3.2.5:
- version "3.2.5"
- resolved "https://registry.yarnpkg.com/async/-/async-3.2.5.tgz#ebd52a8fdaf7a2289a24df399f8d8485c8a46b66"
- integrity sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==
+ version "3.2.6"
+ resolved "https://registry.yarnpkg.com/async/-/async-3.2.6.tgz#1b0728e14929d51b85b449b7f06e27c1145e38ce"
+ integrity sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==
at-least-node@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2"
integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==
-autoprefixer@^10.4.14:
- version "10.4.19"
- resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.19.tgz#ad25a856e82ee9d7898c59583c1afeb3fa65f89f"
- integrity sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==
- dependencies:
- browserslist "^4.23.0"
- caniuse-lite "^1.0.30001599"
- fraction.js "^4.3.7"
- normalize-range "^0.1.2"
- picocolors "^1.0.0"
- postcss-value-parser "^4.2.0"
-
-autoprefixer@^10.4.19:
+autoprefixer@^10.4.14, autoprefixer@^10.4.19:
version "10.4.20"
resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.20.tgz#5caec14d43976ef42e32dcb4bd62878e96be5b3b"
integrity sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==
@@ -3980,9 +3150,9 @@ available-typed-arrays@^1.0.7:
possible-typed-array-names "^1.0.0"
babel-loader@^9.1.3:
- version "9.1.3"
- resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-9.1.3.tgz#3d0e01b4e69760cc694ee306fe16d358aa1c6f9a"
- integrity sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==
+ version "9.2.1"
+ resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-9.2.1.tgz#04c7835db16c246dd19ba0914418f3937797587b"
+ integrity sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==
dependencies:
find-cache-dir "^4.0.0"
schema-utils "^4.0.0"
@@ -4003,14 +3173,6 @@ babel-plugin-polyfill-corejs2@^0.4.10:
"@babel/helper-define-polyfill-provider" "^0.6.2"
semver "^6.3.1"
-babel-plugin-polyfill-corejs3@^0.10.1, babel-plugin-polyfill-corejs3@^0.10.4:
- version "0.10.4"
- resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz#789ac82405ad664c20476d0233b485281deb9c77"
- integrity sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==
- dependencies:
- "@babel/helper-define-polyfill-provider" "^0.6.1"
- core-js-compat "^3.36.1"
-
babel-plugin-polyfill-corejs3@^0.10.6:
version "0.10.6"
resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz#2deda57caef50f59c525aeb4964d3b2f867710c7"
@@ -4137,17 +3299,7 @@ braces@^3.0.3, braces@~3.0.2:
dependencies:
fill-range "^7.1.1"
-browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.22.2, browserslist@^4.23.0:
- version "4.23.0"
- resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab"
- integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==
- dependencies:
- caniuse-lite "^1.0.30001587"
- electron-to-chromium "^1.4.668"
- node-releases "^2.0.14"
- update-browserslist-db "^1.0.13"
-
-browserslist@^4.23.1, browserslist@^4.23.3:
+browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.23.0, browserslist@^4.23.1, browserslist@^4.23.3:
version "4.23.3"
resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.3.tgz#debb029d3c93ebc97ffbc8d9cbb03403e227c800"
integrity sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==
@@ -4244,15 +3396,10 @@ caniuse-api@^3.0.0:
lodash.memoize "^4.1.2"
lodash.uniq "^4.5.0"
-caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001587, caniuse-lite@^1.0.30001599:
- version "1.0.30001616"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001616.tgz#4342712750d35f71ebba9fcac65e2cf8870013c3"
- integrity sha512-RHVYKov7IcdNjVHJFNY/78RdG4oGVjbayxv8u5IO74Wv7Hlq4PnJE6mo/OjFijjVFNy5ijnCt6H3IIo4t+wfEw==
-
-caniuse-lite@^1.0.30001646:
- version "1.0.30001655"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001655.tgz#0ce881f5a19a2dcfda2ecd927df4d5c1684b982f"
- integrity sha512-jRGVy3iSGO5Uutn2owlb5gR6qsGngTw9ZTb4ali9f3glshcNmJ2noam4Mo9zia5P9Dk3jNNydy7vQjuE5dQmfg==
+caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001646:
+ version "1.0.30001660"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001660.tgz#31218de3463fabb44d0b7607b652e56edf2e2355"
+ integrity sha512-GacvNTTuATm26qC74pt+ad1fW15mlQ/zuTzzY1ZoIzECTP8HURDfF43kNxPgf7H1jmelCBQTTbBNxdSXOA7Bqg==
ccount@^2.0.0:
version "2.0.1"
@@ -4325,7 +3472,7 @@ cheerio-select@^2.1.0:
domhandler "^5.0.3"
domutils "^3.0.1"
-cheerio@1.0.0-rc.12, cheerio@^1.0.0-rc.10:
+cheerio@1.0.0-rc.12:
version "1.0.0-rc.12"
resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.12.tgz#788bf7466506b1c6bf5fae51d24a2c4d62e47683"
integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==
@@ -4338,6 +3485,23 @@ cheerio@1.0.0-rc.12, cheerio@^1.0.0-rc.10:
parse5 "^7.0.0"
parse5-htmlparser2-tree-adapter "^7.0.0"
+cheerio@^1.0.0-rc.10:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0.tgz#1ede4895a82f26e8af71009f961a9b8cb60d6a81"
+ integrity sha512-quS9HgjQpdaXOvsZz82Oz7uxtXiy6UIsIQcpBj7HRw2M63Skasm9qlDocAM7jNuaxdhpPU7c4kJN+gA5MCu4ww==
+ dependencies:
+ cheerio-select "^2.1.0"
+ dom-serializer "^2.0.0"
+ domhandler "^5.0.3"
+ domutils "^3.1.0"
+ encoding-sniffer "^0.2.0"
+ htmlparser2 "^9.1.0"
+ parse5 "^7.1.2"
+ parse5-htmlparser2-tree-adapter "^7.0.0"
+ parse5-parser-stream "^7.1.2"
+ undici "^6.19.5"
+ whatwg-mimetype "^4.0.0"
+
chokidar@^3.4.2, chokidar@^3.5.3:
version "3.6.0"
resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b"
@@ -4354,9 +3518,9 @@ chokidar@^3.4.2, chokidar@^3.5.3:
fsevents "~2.3.2"
chrome-trace-event@^1.0.2:
- version "1.0.3"
- resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz#1015eced4741e15d06664a957dbbf50d041e26ac"
- integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==
+ version "1.0.4"
+ resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz#05bffd7ff928465093314708c93bdfa9bd1f0f5b"
+ integrity sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==
ci-info@^3.2.0:
version "3.9.0"
@@ -4394,9 +3558,9 @@ cli-boxes@^3.0.0:
integrity sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==
cli-table3@^0.6.3:
- version "0.6.4"
- resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.4.tgz#d1c536b8a3f2e7bec58f67ac9e5769b1b30088b0"
- integrity sha512-Lm3L0p+/npIQWNIiyF/nAn7T5dnOwR3xNTHXYEBFBFVPXzCVNZ5lqEC/1eo/EVfpDsQ1I+TX4ORPQgp+UI0CRw==
+ version "0.6.5"
+ resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f"
+ integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==
dependencies:
string-width "^4.2.0"
optionalDependencies:
@@ -4633,13 +3797,6 @@ copy-webpack-plugin@^11.0.0:
schema-utils "^4.0.0"
serialize-javascript "^6.0.0"
-core-js-compat@^3.31.0, core-js-compat@^3.36.1:
- version "3.37.0"
- resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.37.0.tgz#d9570e544163779bb4dff1031c7972f44918dc73"
- integrity sha512-vYq4L+T8aS5UuFg4UwDhc7YNRWVeVZwltad9C/jV3R2LgVOpS9BDr7l/WL6BN0dbV3k1XejPTHqqEzJgsa0frA==
- dependencies:
- browserslist "^4.23.0"
-
core-js-compat@^3.37.1, core-js-compat@^3.38.0:
version "3.38.1"
resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.38.1.tgz#2bc7a298746ca5a7bcb9c164bcb120f2ebc09a09"
@@ -4648,14 +3805,14 @@ core-js-compat@^3.37.1, core-js-compat@^3.38.0:
browserslist "^4.23.3"
core-js-pure@^3.30.2:
- version "3.37.0"
- resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.37.0.tgz#ce99fb4a7cec023fdbbe5b5bd1f06bbcba83316e"
- integrity sha512-d3BrpyFr5eD4KcbRvQ3FTUx/KWmaDesr7+a3+1+P46IUnNoEt+oiLijPINZMEon7w9oGkIINWxrBAU9DEciwFQ==
+ version "3.38.1"
+ resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.38.1.tgz#e8534062a54b7221344884ba9b52474be495ada3"
+ integrity sha512-BY8Etc1FZqdw1glX0XNOq2FDwfrg/VGqoZOZCdaL+UmdaqDwQwYXkMJT4t6In+zfEfOJDcM9T0KdbBeJg8KKCQ==
core-js@^3.31.1:
- version "3.37.0"
- resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.37.0.tgz#d8dde58e91d156b2547c19d8a4efd5c7f6c426bb"
- integrity sha512-fu5vHevQ8ZG4og+LXug8ulUtVxjOcEYvifJr7L5Bfq9GOztVqsKd9/59hUk2ZSbCrS3BqUr3EpaYGIYzq7g3Ug==
+ version "3.38.1"
+ resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.38.1.tgz#aa375b79a286a670388a1a363363d53677c0383e"
+ integrity sha512-OP35aUorbU3Zvlx7pjsFdu1rGNnD4pgw/CWoYzRY3t2EzoVT7shKHY1dlAy3f41cGIO7ZDPQimhGFTlEYkG/Hw==
core-util-is@^1.0.3, core-util-is@~1.0.0:
version "1.0.3"
@@ -4699,80 +3856,80 @@ crypto-random-string@^4.0.0:
dependencies:
type-fest "^1.0.1"
-cspell-config-lib@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-config-lib/-/cspell-config-lib-8.14.2.tgz#cc401080c14dab7b355bda4e90a22b7dfb05ffd6"
- integrity sha512-yHP1BdcH5dbjb8qiZr6+bxEnJ+rxTULQ00wBz3eBPWCghJywEAYYvMWoYuxVtPpndlkKYC1wJAHsyNkweQyepA==
+cspell-config-lib@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-config-lib/-/cspell-config-lib-8.14.3.tgz#9a52b8b41d3f6bf10279e42272bc6080966f5412"
+ integrity sha512-uQFhEvnMJBpZBoi5U5jcMdykze5Cng28RDq4jzM2bYR2aE0HKZfFS8Hsjf5SLDxxS7TAKhnDh5a0r+6T/G6+qw==
dependencies:
- "@cspell/cspell-types" "8.14.2"
+ "@cspell/cspell-types" "8.14.3"
comment-json "^4.2.5"
- yaml "^2.5.0"
+ yaml "^2.5.1"
-cspell-dictionary@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-dictionary/-/cspell-dictionary-8.14.2.tgz#0268d437eed78bfcf6eadc3674a3e3104e010d7a"
- integrity sha512-gWuAvf6queGGUvGbfAxxUq55cZ0OevWPbjnCrSB0PpJ4tqdFd8dLcvVrIKzoE2sBXKPw2NDkmoEngs6iGavC0w==
+cspell-dictionary@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-dictionary/-/cspell-dictionary-8.14.3.tgz#05f177924f9c59aba05e394d2f362daf23e39ab7"
+ integrity sha512-FiTas0KXWXKyTJIRYZF7USp7Cjjq6RmzLKcDKAvJhc0XmbRaoYHW20IRkHVsrfO6bHTmo1fLoJQpvuDCkBiojg==
dependencies:
- "@cspell/cspell-pipe" "8.14.2"
- "@cspell/cspell-types" "8.14.2"
- cspell-trie-lib "8.14.2"
+ "@cspell/cspell-pipe" "8.14.3"
+ "@cspell/cspell-types" "8.14.3"
+ cspell-trie-lib "8.14.3"
fast-equals "^5.0.1"
-cspell-gitignore@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-gitignore/-/cspell-gitignore-8.14.2.tgz#239995ac11652a9978126ec35b12d663010a90f3"
- integrity sha512-lrO/49NaKBpkR7vFxv4OOY+oHmsG5+gNQejrBBWD9Nv9vvjJtz/G36X/rcN6M6tFcQQMWwa01kf04nxz8Ejuhg==
+cspell-gitignore@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-gitignore/-/cspell-gitignore-8.14.3.tgz#342f52e62abe2c40513ef66186246e03bb00e73a"
+ integrity sha512-SOPYlaOh2tPSYZ48zGN9TzjaxnO05/AJMpvlvxUf0uASa2BOeogl11KvzcS7ig5MUnB/+s/2YsShcF+YjfptEw==
dependencies:
- "@cspell/url" "8.14.2"
- cspell-glob "8.14.2"
- cspell-io "8.14.2"
+ "@cspell/url" "8.14.3"
+ cspell-glob "8.14.3"
+ cspell-io "8.14.3"
find-up-simple "^1.0.0"
-cspell-glob@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-glob/-/cspell-glob-8.14.2.tgz#09f53191e58db113837a088a7a9ea7a181f2deb7"
- integrity sha512-9Q1Kgoo1ev3fKTpp9y5n8M4RLxd8B0f5o4y5FQe4dBU0j/bt+/YDrLZNWDm77JViV606XQ6fimG1FTTq6pT9/g==
- dependencies:
- "@cspell/url" "8.14.2"
- micromatch "^4.0.7"
-
-cspell-grammar@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-grammar/-/cspell-grammar-8.14.2.tgz#a37e4f6ce6aae8eba2ba57822a4cfe0cd7fd8909"
- integrity sha512-eYwceVP80FGYVJenE42ALnvEKOXaXjq4yVbb1Ni1umO/9qamLWNCQ1RP6rRACy5e/cXviAbhrQ5Mtw6n+pyPEQ==
- dependencies:
- "@cspell/cspell-pipe" "8.14.2"
- "@cspell/cspell-types" "8.14.2"
-
-cspell-io@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-io/-/cspell-io-8.14.2.tgz#501b26c630f6d8e9fc89fce1ad806c10b440723a"
- integrity sha512-uaKpHiY3DAgfdzgKMQml6U8F8o9udMuYxGqYa5FVfN7D5Ap7B2edQzSLTUYwxrFEn4skSfp6XY73+nzJvxzH4Q==
- dependencies:
- "@cspell/cspell-service-bus" "8.14.2"
- "@cspell/url" "8.14.2"
-
-cspell-lib@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-lib/-/cspell-lib-8.14.2.tgz#ca404ad026c7150c96426a11faa5c9f761015bbb"
- integrity sha512-d2oiIXHXnADmnhIuFLOdNE63L7OUfzgpLbYaqAWbkImCUDkevfGrOgnX8TJ03fUgZID4nvQ+3kgu/n2j4eLZjQ==
- dependencies:
- "@cspell/cspell-bundled-dicts" "8.14.2"
- "@cspell/cspell-pipe" "8.14.2"
- "@cspell/cspell-resolver" "8.14.2"
- "@cspell/cspell-types" "8.14.2"
- "@cspell/dynamic-import" "8.14.2"
- "@cspell/filetypes" "8.14.2"
- "@cspell/strong-weak-map" "8.14.2"
- "@cspell/url" "8.14.2"
+cspell-glob@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-glob/-/cspell-glob-8.14.3.tgz#5e70f11ddc93ce701d8fa5cd42131777537593ba"
+ integrity sha512-d12Dn/i6BRKQrvq0ikcBPBsjPdyemu2Ggw1WgWvxAlaGGZsoyC6Hn5ElQt6tQt2CruwUfPPXVm2+UMyHlRMt4g==
+ dependencies:
+ "@cspell/url" "8.14.3"
+ micromatch "^4.0.8"
+
+cspell-grammar@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-grammar/-/cspell-grammar-8.14.3.tgz#973f5ec162b2dad2acc17af7dd19038aff9ba972"
+ integrity sha512-clb5HCVJn6wW/v7dC3CGuo1YmmXIVpvpz7BGwt2Rvybk/8o6CD3i4aF8TqnHc0FIWP0iHCwMhqnCyiOJtYI9Mg==
+ dependencies:
+ "@cspell/cspell-pipe" "8.14.3"
+ "@cspell/cspell-types" "8.14.3"
+
+cspell-io@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-io/-/cspell-io-8.14.3.tgz#72a6787c6be980bd8fc1c55aee9c607e46f61ca2"
+ integrity sha512-8SWe553kpSsQ039SiFQ+G/87KoJn51W1yc42aGUwkuagglspEiUVj3bTlD3eVswZAT3KbG26Mti49L37Lecj/g==
+ dependencies:
+ "@cspell/cspell-service-bus" "8.14.3"
+ "@cspell/url" "8.14.3"
+
+cspell-lib@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-lib/-/cspell-lib-8.14.3.tgz#16ff497da05c4b05f743ddff40481deea9170f45"
+ integrity sha512-hcpxyX+xZ1LbcuvT1H+zCjfmW04on6nsdkKTT0bRdsgxyw6O08hR2OVqQ9+oYiXdp6QccjOl6UPOS6dEZajOmw==
+ dependencies:
+ "@cspell/cspell-bundled-dicts" "8.14.3"
+ "@cspell/cspell-pipe" "8.14.3"
+ "@cspell/cspell-resolver" "8.14.3"
+ "@cspell/cspell-types" "8.14.3"
+ "@cspell/dynamic-import" "8.14.3"
+ "@cspell/filetypes" "8.14.3"
+ "@cspell/strong-weak-map" "8.14.3"
+ "@cspell/url" "8.14.3"
clear-module "^4.1.2"
comment-json "^4.2.5"
- cspell-config-lib "8.14.2"
- cspell-dictionary "8.14.2"
- cspell-glob "8.14.2"
- cspell-grammar "8.14.2"
- cspell-io "8.14.2"
- cspell-trie-lib "8.14.2"
+ cspell-config-lib "8.14.3"
+ cspell-dictionary "8.14.3"
+ cspell-glob "8.14.3"
+ cspell-grammar "8.14.3"
+ cspell-io "8.14.3"
+ cspell-trie-lib "8.14.3"
env-paths "^3.0.0"
fast-equals "^5.0.1"
gensequence "^7.0.0"
@@ -4782,36 +3939,36 @@ cspell-lib@8.14.2:
vscode-uri "^3.0.8"
xdg-basedir "^5.1.0"
-cspell-trie-lib@8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell-trie-lib/-/cspell-trie-lib-8.14.2.tgz#3748544d4f6ca85c3d72a1f2eb72b930c898865b"
- integrity sha512-rZMbaEBGoyy4/zxKECaMyVyGLbuUxYmZ5jlEgiA3xPtEdWwJ4iWRTo5G6dWbQsXoxPYdAXXZ0/q0GQ2y6Jt0kw==
+cspell-trie-lib@8.14.3:
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell-trie-lib/-/cspell-trie-lib-8.14.3.tgz#ac7436d934a3162df04ab0e09fb13d07ae26d8c5"
+ integrity sha512-90Rwt8Kzcv2HB2uuwUqMlCQVa7tpsqHtSFiGox3DTDUZWTikMiOwUigRvO17FsxxQL+qW4fIH4PUi4SGwins9Q==
dependencies:
- "@cspell/cspell-pipe" "8.14.2"
- "@cspell/cspell-types" "8.14.2"
+ "@cspell/cspell-pipe" "8.14.3"
+ "@cspell/cspell-types" "8.14.3"
gensequence "^7.0.0"
cspell@^8.14.2:
- version "8.14.2"
- resolved "https://registry.yarnpkg.com/cspell/-/cspell-8.14.2.tgz#d1434bc66831113121a91427c39dc22802dc4c31"
- integrity sha512-ii/W7fwO4chNQVYl1C/8k7RW8EXzLb69rvg08p8mSJx8B2UasVJ9tuJpTH2Spo1jX6N3H0dKPWUbd1fAmdAhPg==
- dependencies:
- "@cspell/cspell-json-reporter" "8.14.2"
- "@cspell/cspell-pipe" "8.14.2"
- "@cspell/cspell-types" "8.14.2"
- "@cspell/dynamic-import" "8.14.2"
- "@cspell/url" "8.14.2"
+ version "8.14.3"
+ resolved "https://registry.yarnpkg.com/cspell/-/cspell-8.14.3.tgz#60bd93d3514a172f2160e2369052c14775333d31"
+ integrity sha512-GTok3s0J6hb8lXPgOkFcJ6+i91YS99AD5t60htNrq7Ae89BZByG20XPZc/6zbRN9eEQvtCx4OAIXnnfxP5QENw==
+ dependencies:
+ "@cspell/cspell-json-reporter" "8.14.3"
+ "@cspell/cspell-pipe" "8.14.3"
+ "@cspell/cspell-types" "8.14.3"
+ "@cspell/dynamic-import" "8.14.3"
+ "@cspell/url" "8.14.3"
chalk "^5.3.0"
chalk-template "^1.1.0"
commander "^12.1.0"
- cspell-dictionary "8.14.2"
- cspell-gitignore "8.14.2"
- cspell-glob "8.14.2"
- cspell-io "8.14.2"
- cspell-lib "8.14.2"
+ cspell-dictionary "8.14.3"
+ cspell-gitignore "8.14.3"
+ cspell-glob "8.14.3"
+ cspell-io "8.14.3"
+ cspell-lib "8.14.3"
fast-glob "^3.3.2"
fast-json-stable-stringify "^2.1.0"
- file-entry-cache "^9.0.0"
+ file-entry-cache "^9.1.0"
get-stdin "^9.0.0"
semver "^7.6.3"
strip-ansi "^7.1.0"
@@ -5027,19 +4184,12 @@ debug@2.6.9, debug@^2.6.0:
dependencies:
ms "2.0.0"
-debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1:
- version "4.3.4"
- resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865"
- integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
- dependencies:
- ms "2.1.2"
-
-debug@^4.3.4:
- version "4.3.5"
- resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e"
- integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==
+debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4:
+ version "4.3.7"
+ resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52"
+ integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==
dependencies:
- ms "2.1.2"
+ ms "^2.1.3"
decko@^1.2.0:
version "1.2.0"
@@ -5162,9 +4312,9 @@ detect-port-alt@^1.1.6:
debug "^2.6.0"
detect-port@^1.5.1:
- version "1.5.1"
- resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.5.1.tgz#451ca9b6eaf20451acb0799b8ab40dff7718727b"
- integrity sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==
+ version "1.6.1"
+ resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.6.1.tgz#45e4073997c5f292b957cb678fb0bb8ed4250a67"
+ integrity sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==
dependencies:
address "^1.0.1"
debug "4"
@@ -5268,7 +4418,7 @@ domutils@^2.5.2, domutils@^2.8.0:
domelementtype "^2.2.0"
domhandler "^4.2.0"
-domutils@^3.0.1:
+domutils@^3.0.1, domutils@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.1.0.tgz#c47f551278d3dc4b0b1ab8cbb42d751a6f0d824e"
integrity sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==
@@ -5307,15 +4457,10 @@ ee-first@1.1.1:
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==
-electron-to-chromium@^1.4.668:
- version "1.4.756"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.756.tgz#7b872ed8c8c5bee571be771730225d6d2a37fe45"
- integrity sha512-RJKZ9+vEBMeiPAvKNWyZjuYyUqMndcP1f335oHqn3BEQbs2NFtVrnK5+6Xg5wSM9TknNNpWghGDUCKGYF+xWXw==
-
electron-to-chromium@^1.5.4:
- version "1.5.13"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz#1abf0410c5344b2b829b7247e031f02810d442e6"
- integrity sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==
+ version "1.5.24"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.24.tgz#b3cd2f71b7a84bac340d862e3b7b0aadf48478de"
+ integrity sha512-0x0wLCmpdKFCi9ulhvYZebgcPmHTkFVUfU2wzDykadkslKwT4oAmDTHEKLnlrDsMGZe4B+ksn8quZfZjYsBetA==
emoji-regex@^8.0.0:
version "8.0.0"
@@ -5338,9 +4483,9 @@ emojis-list@^3.0.0:
integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==
emoticon@^4.0.1:
- version "4.0.1"
- resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-4.0.1.tgz#2d2bbbf231ce3a5909e185bbb64a9da703a1e749"
- integrity sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==
+ version "4.1.0"
+ resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-4.1.0.tgz#d5a156868ee173095627a33de3f1e914c3dde79e"
+ integrity sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==
encodeurl@~1.0.2:
version "1.0.2"
@@ -5352,6 +4497,14 @@ encodeurl@~2.0.0:
resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-2.0.0.tgz#7b8ea898077d7e409d3ac45474ea38eaf0857a58"
integrity sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==
+encoding-sniffer@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.yarnpkg.com/encoding-sniffer/-/encoding-sniffer-0.2.0.tgz#799569d66d443babe82af18c9f403498365ef1d5"
+ integrity sha512-ju7Wq1kg04I3HtiYIOrUrdfdDvkyO9s5XM8QAj/bN61Yo/Vb4vgJxy5vi4Yxk01gWHbrofpPtpxM8bKger9jhg==
+ dependencies:
+ iconv-lite "^0.6.3"
+ whatwg-encoding "^3.1.1"
+
enhanced-resolve@^5.17.1:
version "5.17.1"
resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15"
@@ -5365,7 +4518,7 @@ entities@^2.0.0:
resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55"
integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==
-entities@^4.2.0, entities@^4.4.0:
+entities@^4.2.0, entities@^4.4.0, entities@^4.5.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48"
integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
@@ -5455,9 +4608,9 @@ es-errors@^1.2.1, es-errors@^1.3.0:
integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
es-module-lexer@^1.2.1:
- version "1.5.2"
- resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.2.tgz#00b423304f2500ac59359cc9b6844951f372d497"
- integrity sha512-l60ETUTmLqbVbVHv1J4/qj+M8nq7AwMzEcg3kmJDt9dCNrTk+yHcYFf/Kw75pMDwd9mPcIGCG5LcS20SxYRzFA==
+ version "1.5.4"
+ resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.4.tgz#a8efec3a3da991e60efa6b633a7cad6ab8d26b78"
+ integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==
es-object-atoms@^1.0.0:
version "1.0.0"
@@ -5490,9 +4643,9 @@ es6-promise@^3.2.1:
integrity sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==
escalade@^3.1.1, escalade@^3.1.2:
- version "3.1.2"
- resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27"
- integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==
+ version "3.2.0"
+ resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5"
+ integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==
escape-goat@^4.0.0:
version "4.0.0"
@@ -5592,12 +4745,11 @@ estree-util-to-js@^2.0.0:
source-map "^0.7.0"
estree-util-value-to-estree@^3.0.1:
- version "3.1.1"
- resolved "https://registry.yarnpkg.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.1.tgz#a007388eca677510f319603a2f279fed6d104a15"
- integrity sha512-5mvUrF2suuv5f5cGDnDphIy4/gW86z82kl5qG6mM9z04SEQI4FB5Apmaw/TGEf3l55nLtMs5s51dmhUzvAHQCA==
+ version "3.1.2"
+ resolved "https://registry.yarnpkg.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz#d2f0e5d350a6c181673eb7299743325b86a9bf5c"
+ integrity sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==
dependencies:
"@types/estree" "^1.0.0"
- is-plain-obj "^4.0.0"
estree-util-visit@^2.0.0:
version "2.0.0"
@@ -5747,6 +4899,11 @@ fast-safe-stringify@^2.0.7:
resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz#c406a83b6e70d9e35ce3b30a81141df30aeba884"
integrity sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==
+fast-uri@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.1.tgz#cddd2eecfc83a71c1be2cc2ef2061331be8a7134"
+ integrity sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw==
+
fast-url-parser@1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d"
@@ -5782,7 +4939,7 @@ feed@^4.2.2:
dependencies:
xml-js "^1.6.11"
-file-entry-cache@^9.0.0:
+file-entry-cache@^9.1.0:
version "9.1.0"
resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-9.1.0.tgz#2e66ad98ce93f49aed1b178c57b0b5741591e075"
integrity sha512-/pqPFG+FdxWQj+/WSuzXSDaNzxgTLr/OrR1QuqfEZzDakpdYE70PwUxL7BPUa8hpjbvY1+qvCl8k+8Tq34xJgg==
@@ -5877,9 +5034,9 @@ flatted@^3.3.1:
integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==
follow-redirects@^1.0.0:
- version "1.15.6"
- resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
- integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
+ version "1.15.9"
+ resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1"
+ integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==
for-each@^0.3.3:
version "0.3.3"
@@ -6292,9 +5449,9 @@ hast-util-parse-selector@^4.0.0:
"@types/hast" "^3.0.0"
hast-util-raw@^9.0.0:
- version "9.0.2"
- resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-9.0.2.tgz#39b4a4886bd9f0a5dd42e86d02c966c2c152884c"
- integrity sha512-PldBy71wO9Uq1kyaMch9AHIghtQvIwxBUkv823pKmkTM3oV1JxtsTNYdevMxvUHqcnOAuO65JKU2+0NOxc2ksA==
+ version "9.0.4"
+ resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-9.0.4.tgz#2da03e37c46eb1a6f1391f02f9b84ae65818f7ed"
+ integrity sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==
dependencies:
"@types/hast" "^3.0.0"
"@types/unist" "^3.0.0"
@@ -6502,6 +5659,16 @@ htmlparser2@^8.0.1:
domutils "^3.0.1"
entities "^4.4.0"
+htmlparser2@^9.1.0:
+ version "9.1.0"
+ resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-9.1.0.tgz#cdb498d8a75a51f739b61d3f718136c369bc8c23"
+ integrity sha512-5zfg6mHUoaer/97TxnGpxmbR7zJtPwIYFMZ/H5ucTlPZhKvtum05yiPK3Mgai3a0DyVxv7qYqoweaEd2nrYQzQ==
+ dependencies:
+ domelementtype "^2.3.0"
+ domhandler "^5.0.3"
+ domutils "^3.1.0"
+ entities "^4.5.0"
+
http-cache-semantics@^4.1.1:
version "4.1.1"
resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a"
@@ -6579,15 +5746,7 @@ http2-wrapper@^2.1.10:
quick-lru "^5.1.1"
resolve-alpn "^1.2.0"
-https-proxy-agent@^7.0.2, https-proxy-agent@^7.0.3:
- version "7.0.4"
- resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.4.tgz#8e97b841a029ad8ddc8731f26595bad868cb4168"
- integrity sha512-wlwpilI7YdjSkWaQ/7omYBMTliDcmCN8OLihO6I9B86g06lMyAoqgoDpV0XqoaPOKj+0DIdAvnsWfyAAhmimcg==
- dependencies:
- agent-base "^7.0.2"
- debug "4"
-
-https-proxy-agent@^7.0.4:
+https-proxy-agent@^7.0.3, https-proxy-agent@^7.0.4, https-proxy-agent@^7.0.5:
version "7.0.5"
resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz#9e8b5013873299e11fab6fd548405da2d6c602b2"
integrity sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==
@@ -6607,7 +5766,7 @@ iconv-lite@0.4.24:
dependencies:
safer-buffer ">= 2.1.2 < 3"
-iconv-lite@^0.6.3:
+iconv-lite@0.6.3, iconv-lite@^0.6.3:
version "0.6.3"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501"
integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==
@@ -6620,9 +5779,9 @@ icss-utils@^5.0.0, icss-utils@^5.1.0:
integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==
ignore@^5.2.0, ignore@^5.2.4:
- version "5.3.1"
- resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.1.tgz#5073e554cd42c5b33b394375f538b8593e34d4ef"
- integrity sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==
+ version "5.3.2"
+ resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5"
+ integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==
image-size@^1.0.2:
version "1.1.1"
@@ -6707,10 +5866,10 @@ inline-style-parser@0.1.1:
resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1"
integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==
-inline-style-parser@0.2.3:
- version "0.2.3"
- resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.2.3.tgz#e35c5fb45f3a83ed7849fe487336eb7efa25971c"
- integrity sha512-qlD8YNDqyTKTyuITrDOffsl6Tdhv+UC4hcdAVuQsK4IMQ99nSgd1MIA/Q+jQYoh9r3hVUXhYh7urSRmXPkW04g==
+inline-style-parser@0.2.4:
+ version "0.2.4"
+ resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.2.4.tgz#f4af5fe72e612839fcd453d989a586566d695f22"
+ integrity sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==
internal-slot@^1.0.7:
version "1.0.7"
@@ -6817,11 +5976,11 @@ is-ci@^3.0.1:
ci-info "^3.2.0"
is-core-module@^2.13.0:
- version "2.13.1"
- resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384"
- integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==
+ version "2.15.1"
+ resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.1.tgz#a7363a25bee942fefab0de13bf6aa372c82dcc37"
+ integrity sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==
dependencies:
- hasown "^2.0.0"
+ hasown "^2.0.2"
is-data-view@^1.0.1:
version "1.0.1"
@@ -7094,14 +6253,14 @@ jest-worker@^29.4.3:
supports-color "^8.0.0"
jiti@^1.20.0:
- version "1.21.0"
- resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.0.tgz#7c97f8fe045724e136a397f7340475244156105d"
- integrity sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==
+ version "1.21.6"
+ resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268"
+ integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==
joi@^17.9.2:
- version "17.13.1"
- resolved "https://registry.yarnpkg.com/joi/-/joi-17.13.1.tgz#9c7b53dc3b44dd9ae200255cc3b398874918a6ca"
- integrity sha512-vaBlIKCyo4FCUtCm7Eu4QZd/q02bWcxfUO6YSXAZOWF6gzcLBeba8kwotUdYJjDLW8Cz8RywsSOqiNJZW0mNvg==
+ version "17.13.3"
+ resolved "https://registry.yarnpkg.com/joi/-/joi-17.13.3.tgz#0f5cc1169c999b30d344366d384b12d92558bcec"
+ integrity sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==
dependencies:
"@hapi/hoek" "^9.3.0"
"@hapi/topo" "^5.1.0"
@@ -7215,9 +6374,9 @@ latest-version@^7.0.0:
package-json "^8.1.0"
launch-editor@^2.6.0:
- version "2.6.1"
- resolved "https://registry.yarnpkg.com/launch-editor/-/launch-editor-2.6.1.tgz#f259c9ef95cbc9425620bbbd14b468fcdb4ffe3c"
- integrity sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==
+ version "2.9.1"
+ resolved "https://registry.yarnpkg.com/launch-editor/-/launch-editor-2.9.1.tgz#253f173bd441e342d4344b4dae58291abb425047"
+ integrity sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w==
dependencies:
picocolors "^1.0.0"
shell-quote "^1.8.1"
@@ -7263,9 +6422,9 @@ loader-utils@^2.0.0:
json5 "^2.1.2"
loader-utils@^3.2.0:
- version "3.2.1"
- resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.2.1.tgz#4fb104b599daafd82ef3e1a41fb9265f87e1f576"
- integrity sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==
+ version "3.3.1"
+ resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.3.1.tgz#735b9a19fd63648ca7adbd31c2327dfe281304e5"
+ integrity sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==
locate-path@^3.0.0:
version "3.0.0"
@@ -7428,9 +6587,9 @@ mdast-util-find-and-replace@^3.0.0, mdast-util-find-and-replace@^3.0.1:
unist-util-visit-parents "^6.0.0"
mdast-util-from-markdown@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz#52f14815ec291ed061f2922fd14d6689c810cb88"
- integrity sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz#32a6e8f512b416e1f51eb817fc64bd867ebcd9cc"
+ integrity sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==
dependencies:
"@types/mdast" "^4.0.0"
"@types/unist" "^3.0.0"
@@ -7458,9 +6617,9 @@ mdast-util-frontmatter@^2.0.0:
micromark-extension-frontmatter "^2.0.0"
mdast-util-gfm-autolink-literal@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz#5baf35407421310a08e68c15e5d8821e8898ba2a"
- integrity sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz#abd557630337bd30a6d5a4bd8252e1c2dc0875d5"
+ integrity sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==
dependencies:
"@types/mdast" "^4.0.0"
ccount "^2.0.0"
@@ -7523,9 +6682,9 @@ mdast-util-gfm@^3.0.0:
mdast-util-to-markdown "^2.0.0"
mdast-util-mdx-expression@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz#4968b73724d320a379110d853e943a501bfd9d87"
- integrity sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz#43f0abac9adc756e2086f63822a38c8d3c3a5096"
+ integrity sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==
dependencies:
"@types/estree-jsx" "^1.0.0"
"@types/hast" "^3.0.0"
@@ -7535,9 +6694,9 @@ mdast-util-mdx-expression@^2.0.0:
mdast-util-to-markdown "^2.0.0"
mdast-util-mdx-jsx@^3.0.0:
- version "3.1.2"
- resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.2.tgz#daae777c72f9c4a106592e3025aa50fb26068e1b"
- integrity sha512-eKMQDeywY2wlHc97k5eD8VC+9ASMjN8ItEZQNGwJ6E0XWKiW/Z0V5/H8pvoXUf+y+Mj0VIgeRRbujBmFn4FTyA==
+ version "3.1.3"
+ resolved "https://registry.yarnpkg.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz#76b957b3da18ebcfd0de3a9b4451dcd6fdec2320"
+ integrity sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==
dependencies:
"@types/estree-jsx" "^1.0.0"
"@types/hast" "^3.0.0"
@@ -7549,7 +6708,6 @@ mdast-util-mdx-jsx@^3.0.0:
mdast-util-to-markdown "^2.0.0"
parse-entities "^4.0.0"
stringify-entities "^4.0.0"
- unist-util-remove-position "^5.0.0"
unist-util-stringify-position "^4.0.0"
vfile-message "^4.0.0"
@@ -7585,9 +6743,9 @@ mdast-util-phrasing@^4.0.0:
unist-util-is "^6.0.0"
mdast-util-to-hast@^13.0.0:
- version "13.1.0"
- resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-13.1.0.tgz#1ae54d903150a10fe04d59f03b2b95fd210b2124"
- integrity sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==
+ version "13.2.0"
+ resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz#5ca58e5b921cc0a3ded1bc02eed79a4fe4fe41f4"
+ integrity sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==
dependencies:
"@types/hast" "^3.0.0"
"@types/mdast" "^4.0.0"
@@ -7685,9 +6843,9 @@ micromark-core-commonmark@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-directive@^3.0.0:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-directive/-/micromark-extension-directive-3.0.0.tgz#527869de497a6de9024138479091bc885dae076b"
- integrity sha512-61OI07qpQrERc+0wEysLHMvoiO3s2R56x5u7glHq2Yqq6EHbH4dW25G9GfDdGCDYqA21KE6DWgNSzxSwHc2hSg==
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/micromark-extension-directive/-/micromark-extension-directive-3.0.1.tgz#67b3985bb991a69dbcae52664c57ee54b22f635a"
+ integrity sha512-VGV2uxUzhEZmaP7NSFo2vtq7M2nUD+WfmYQD+d8i/1nHbzE+rMy9uzTvUybBbNiVbrhOZibg3gbyoARGqgDWyg==
dependencies:
devlop "^1.0.0"
micromark-factory-space "^2.0.0"
@@ -7708,9 +6866,9 @@ micromark-extension-frontmatter@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-gfm-autolink-literal@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.0.0.tgz#f1e50b42e67d441528f39a67133eddde2bbabfd9"
- integrity sha512-rTHfnpt/Q7dEAK1Y5ii0W8bhfJlVJFnJMHIPisfPK3gpVNuOP0VnRl96+YJ3RYWV/P4gFeQoGKNlT3RhuvpqAg==
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz#6286aee9686c4462c1e3552a9d505feddceeb935"
+ integrity sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==
dependencies:
micromark-util-character "^2.0.0"
micromark-util-sanitize-uri "^2.0.0"
@@ -7718,9 +6876,9 @@ micromark-extension-gfm-autolink-literal@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-gfm-footnote@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.0.0.tgz#91afad310065a94b636ab1e9dab2c60d1aab953c"
- integrity sha512-6Rzu0CYRKDv3BfLAUnZsSlzx3ak6HAoI85KTiijuKIz5UxZxbUI+pD6oHgw+6UtQuiRwnGRhzMmPRv4smcz0fg==
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz#4dab56d4e398b9853f6fe4efac4fc9361f3e0750"
+ integrity sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==
dependencies:
devlop "^1.0.0"
micromark-core-commonmark "^2.0.0"
@@ -7732,9 +6890,9 @@ micromark-extension-gfm-footnote@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-gfm-strikethrough@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.0.0.tgz#6917db8e320da70e39ffbf97abdbff83e6783e61"
- integrity sha512-c3BR1ClMp5fxxmwP6AoOY2fXO9U8uFMKs4ADD66ahLTNcwzSCyRVU4k7LPV5Nxo/VJiR4TdzxRQY2v3qIUceCw==
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz#86106df8b3a692b5f6a92280d3879be6be46d923"
+ integrity sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==
dependencies:
devlop "^1.0.0"
micromark-util-chunked "^2.0.0"
@@ -7744,9 +6902,9 @@ micromark-extension-gfm-strikethrough@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-gfm-table@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.0.0.tgz#2cf3fe352d9e089b7ef5fff003bdfe0da29649b7"
- integrity sha512-PoHlhypg1ItIucOaHmKE8fbin3vTLpDOUg8KAr8gRCF1MOZI9Nquq2i/44wFvviM4WuxJzc3demT8Y3dkfvYrw==
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz#5cadedfbb29fca7abf752447967003dc3b6583c9"
+ integrity sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==
dependencies:
devlop "^1.0.0"
micromark-factory-space "^2.0.0"
@@ -7762,9 +6920,9 @@ micromark-extension-gfm-tagfilter@^2.0.0:
micromark-util-types "^2.0.0"
micromark-extension-gfm-task-list-item@^2.0.0:
- version "2.0.1"
- resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.0.1.tgz#ee8b208f1ced1eb9fb11c19a23666e59d86d4838"
- integrity sha512-cY5PzGcnULaN5O7T+cOzfMoHjBW7j+T9D2sucA5d/KbsBTPcYdebm9zUd9zzdgJGCwahV+/W78Z3nbulBYVbTw==
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz#bcc34d805639829990ec175c3eea12bb5b781f2c"
+ integrity sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==
dependencies:
devlop "^1.0.0"
micromark-factory-space "^2.0.0"
@@ -7801,9 +6959,9 @@ micromark-extension-mdx-expression@^3.0.0:
micromark-util-types "^2.0.0"
micromark-extension-mdx-jsx@^3.0.0:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.0.tgz#4aba0797c25efb2366a3fd2d367c6b1c1159f4f5"
- integrity sha512-uvhhss8OGuzR4/N17L1JwvmJIpPhAd8oByMawEKx6NVdBCbesjH4t+vjEp3ZXft9DwvlKSD07fCeI44/N0Vf2w==
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz#5abb83da5ddc8e473a374453e6ea56fbd66b59ad"
+ integrity sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==
dependencies:
"@types/acorn" "^4.0.0"
"@types/estree" "^1.0.0"
@@ -7812,6 +6970,7 @@ micromark-extension-mdx-jsx@^3.0.0:
micromark-factory-mdx-expression "^2.0.0"
micromark-factory-space "^2.0.0"
micromark-util-character "^2.0.0"
+ micromark-util-events-to-acorn "^2.0.0"
micromark-util-symbol "^2.0.0"
micromark-util-types "^2.0.0"
vfile-message "^4.0.0"
@@ -7872,12 +7031,13 @@ micromark-factory-label@^2.0.0:
micromark-util-types "^2.0.0"
micromark-factory-mdx-expression@^2.0.0:
- version "2.0.1"
- resolved "https://registry.yarnpkg.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.1.tgz#f2a9724ce174f1751173beb2c1f88062d3373b1b"
- integrity sha512-F0ccWIUHRLRrYp5TC9ZYXmZo+p2AM13ggbsW4T0b5CRKP8KHVRB8t4pwtBgTxtjRmwrK0Irwm7vs2JOZabHZfg==
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz#2afaa8ba6d5f63e0cead3e4dee643cad184ca260"
+ integrity sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==
dependencies:
"@types/estree" "^1.0.0"
devlop "^1.0.0"
+ micromark-factory-space "^2.0.0"
micromark-util-character "^2.0.0"
micromark-util-events-to-acorn "^2.0.0"
micromark-util-symbol "^2.0.0"
@@ -8078,7 +7238,7 @@ micromark@^4.0.0:
micromark-util-symbol "^2.0.0"
micromark-util-types "^2.0.0"
-micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5, micromatch@^4.0.7:
+micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5, micromatch@^4.0.8:
version "4.0.8"
resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202"
integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==
@@ -8086,11 +7246,16 @@ micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5, micromatch@^4.0.7:
braces "^3.0.3"
picomatch "^2.3.1"
-mime-db@1.52.0, "mime-db@>= 1.43.0 < 2":
+mime-db@1.52.0:
version "1.52.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+"mime-db@>= 1.43.0 < 2":
+ version "1.53.0"
+ resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.53.0.tgz#3cb63cd820fc29896d9d4e8c32ab4fcd74ccb447"
+ integrity sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg==
+
mime-db@~1.33.0:
version "1.33.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db"
@@ -8131,9 +7296,9 @@ mimic-response@^4.0.0:
integrity sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==
mini-css-extract-plugin@^2.7.6:
- version "2.9.0"
- resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.0.tgz#c73a1327ccf466f69026ac22a8e8fd707b78a235"
- integrity sha512-Zs1YsZVfemekSZG+44vBsYTLQORkPMwnlv+aehcxK/NLKC+EGhDB39/YePYYqx/sTk6NnYpuqikhSn7+JIevTA==
+ version "2.9.1"
+ resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.1.tgz#4d184f12ce90582e983ccef0f6f9db637b4be758"
+ integrity sha512-+Vyi+GCCOHnrJ2VPS+6aPoXN2k2jgUzDRhTFLjjTBn23qyXJXkjUWQgTL+mXpF5/A8ixLdCc6kWsoeOjKGejKQ==
dependencies:
schema-utils "^4.0.0"
tapable "^2.2.1"
@@ -8177,9 +7342,9 @@ mobx-react@^9.1.1:
mobx-react-lite "^4.0.7"
mobx@^6.12.4:
- version "6.13.1"
- resolved "https://registry.yarnpkg.com/mobx/-/mobx-6.13.1.tgz#76c41aa675199f75b84a257e4bec8ff839e33259"
- integrity sha512-ekLRxgjWJr8hVxj9ZKuClPwM/iHckx3euIJ3Np7zLVNtqJvfbbq7l370W/98C8EabdQ1pB5Jd3BbDWxJPNnaOg==
+ version "6.13.2"
+ resolved "https://registry.yarnpkg.com/mobx/-/mobx-6.13.2.tgz#e4514c983c41611d7008ac4cd21c7f3d1be3180d"
+ integrity sha512-GIubI2qf+P6lG6rSEG0T2pg3jV9/0+O0ncF09+0umRe75+Cbnh1KNLM1GvbTY9RSc7QuU+LcPNZfxDY8B+3XRg==
mrmime@^2.0.0:
version "2.0.0"
@@ -8191,11 +7356,6 @@ ms@2.0.0:
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==
-ms@2.1.2:
- version "2.1.2"
- resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
- integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
-
ms@2.1.3, ms@^2.1.3:
version "2.1.3"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2"
@@ -8281,11 +7441,6 @@ node-readfiles@^0.2.0:
dependencies:
es6-promise "^3.2.1"
-node-releases@^2.0.14:
- version "2.0.14"
- resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b"
- integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==
-
node-releases@^2.0.18:
version "2.0.18"
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f"
@@ -8377,9 +7532,9 @@ object-assign@^4.1.1:
integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
object-inspect@^1.13.1:
- version "1.13.1"
- resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2"
- integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==
+ version "1.13.2"
+ resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff"
+ integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==
object-is@^1.1.5:
version "1.1.6"
@@ -8525,20 +7680,20 @@ p-try@^2.0.0:
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
pac-proxy-agent@^7.0.1:
- version "7.0.1"
- resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-7.0.1.tgz#6b9ddc002ec3ff0ba5fdf4a8a21d363bcc612d75"
- integrity sha512-ASV8yU4LLKBAjqIPMbrgtaKIvxQri/yh2OpI+S6hVa9JRkUI3Y3NPFbfngDtY7oFtSMD3w31Xns89mDa3Feo5A==
+ version "7.0.2"
+ resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-7.0.2.tgz#0fb02496bd9fb8ae7eb11cfd98386daaac442f58"
+ integrity sha512-BFi3vZnO9X5Qt6NRz7ZOaPja3ic0PhlsmCRYLOpN11+mWBCR6XJDqW5RF3j8jm4WGGQZtBA+bTfxYzeKW73eHg==
dependencies:
"@tootallnate/quickjs-emscripten" "^0.23.0"
agent-base "^7.0.2"
debug "^4.3.4"
get-uri "^6.0.1"
http-proxy-agent "^7.0.0"
- https-proxy-agent "^7.0.2"
- pac-resolver "^7.0.0"
- socks-proxy-agent "^8.0.2"
+ https-proxy-agent "^7.0.5"
+ pac-resolver "^7.0.1"
+ socks-proxy-agent "^8.0.4"
-pac-resolver@^7.0.0:
+pac-resolver@^7.0.1:
version "7.0.1"
resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-7.0.1.tgz#54675558ea368b64d210fd9c92a640b5f3b8abb6"
integrity sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==
@@ -8615,7 +7770,14 @@ parse5-htmlparser2-tree-adapter@^7.0.0:
domhandler "^5.0.2"
parse5 "^7.0.0"
-parse5@^7.0.0:
+parse5-parser-stream@^7.1.2:
+ version "7.1.2"
+ resolved "https://registry.yarnpkg.com/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz#d7c20eadc37968d272e2c02660fff92dd27e60e1"
+ integrity sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow==
+ dependencies:
+ parse5 "^7.0.0"
+
+parse5@^7.0.0, parse5@^7.1.2:
version "7.1.2"
resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32"
integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==
@@ -8680,18 +7842,18 @@ path-to-regexp@0.1.10:
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b"
integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==
+path-to-regexp@1.9.0, path-to-regexp@^1.7.0:
+ version "1.9.0"
+ resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz#5dc0753acbf8521ca2e0f137b4578b917b10cf24"
+ integrity sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==
+ dependencies:
+ isarray "0.0.1"
+
path-to-regexp@2.2.1:
version "2.2.1"
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-2.2.1.tgz#90b617025a16381a879bc82a38d4e8bdeb2bcf45"
integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==
-path-to-regexp@^1.7.0:
- version "1.8.0"
- resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a"
- integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
- dependencies:
- isarray "0.0.1"
-
path-type@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
@@ -8711,15 +7873,10 @@ periscopic@^3.0.0:
estree-walker "^3.0.0"
is-reference "^3.0.0"
-picocolors@^1.0.0:
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c"
- integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==
-
-picocolors@^1.0.1:
- version "1.0.1"
- resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1"
- integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
+picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.0.tgz#5358b76a78cde483ba5cef6a9dc9671440b27d59"
+ integrity sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==
picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1:
version "2.3.1"
@@ -8997,7 +8154,7 @@ postcss-reduce-transforms@^6.0.2:
dependencies:
postcss-value-parser "^4.2.0"
-postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.16:
+postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.16, postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4:
version "6.1.2"
resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz#27ecb41fb0e3b6ba7a1ec84fff347f734c7929de"
integrity sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==
@@ -9005,14 +8162,6 @@ postcss-selector-parser@^6.0.11, postcss-selector-parser@^6.0.16:
cssesc "^3.0.0"
util-deprecate "^1.0.2"
-postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4:
- version "6.0.16"
- resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.16.tgz#3b88b9f5c5abd989ef4e2fc9ec8eedd34b20fb04"
- integrity sha512-A0RVJrX+IUkVZbW3ClroRWurercFhieevHB38sr2+l9eUClMqome3LmEmnhlNy+5Mr2EYN6B2Kaw9wYdd+VHiw==
- dependencies:
- cssesc "^3.0.0"
- util-deprecate "^1.0.2"
-
postcss-sort-media-queries@^5.2.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz#4556b3f982ef27d3bac526b99b6c0d3359a6cf97"
@@ -9045,7 +8194,7 @@ postcss-zindex@^6.0.2:
resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-6.0.2.tgz#e498304b83a8b165755f53db40e2ea65a99b56e1"
integrity sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==
-postcss@8.4.38, postcss@^8.4.21, postcss@^8.4.26, postcss@^8.4.33:
+postcss@8.4.38:
version "8.4.38"
resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.38.tgz#b387d533baf2054288e337066d81c6bee9db9e0e"
integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==
@@ -9054,14 +8203,14 @@ postcss@8.4.38, postcss@^8.4.21, postcss@^8.4.26, postcss@^8.4.33:
picocolors "^1.0.0"
source-map-js "^1.2.0"
-postcss@^8.4.24, postcss@^8.4.38:
- version "8.4.44"
- resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.44.tgz#d56834ef6508610ba224bb22b2457b2169ed0480"
- integrity sha512-Aweb9unOEpQ3ezu4Q00DPvvM2ZTUitJdNKeP/+uQgr1IBIqu574IaZoURId7BKtWMREwzKa9OgzPzezWGPWFQw==
+postcss@^8.4.21, postcss@^8.4.24, postcss@^8.4.26, postcss@^8.4.33, postcss@^8.4.38:
+ version "8.4.47"
+ resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.47.tgz#5bf6c9a010f3e724c503bf03ef7947dcb0fea365"
+ integrity sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==
dependencies:
nanoid "^3.3.7"
- picocolors "^1.0.1"
- source-map-js "^1.2.0"
+ picocolors "^1.1.0"
+ source-map-js "^1.2.1"
pretty-error@^4.0.0:
version "4.0.0"
@@ -9280,9 +8429,9 @@ react-fast-compare@^3.2.0, react-fast-compare@^3.2.2:
integrity sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==
react-helmet-async@*:
- version "2.0.4"
- resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-2.0.4.tgz#50a4377778f380ed1d0136303916b38eff1bf153"
- integrity sha512-yxjQMWposw+akRfvpl5+8xejl4JtUlHnEBcji6u8/e6oc7ozT+P9PNTWMhCbz2y9tc5zPegw2BvKjQA+NwdEjQ==
+ version "2.0.5"
+ resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-2.0.5.tgz#cfc70cd7bb32df7883a8ed55502a1513747223ec"
+ integrity sha512-rYUYHeus+i27MvFE+Jaa4WsyBKGkL6qVgbJvSBoX8mbsWoABJXdEO0bZyi0F6i+4f0NuIb8AvqPMj3iXFHkMwg==
dependencies:
invariant "^2.2.4"
react-fast-compare "^3.2.2"
@@ -9310,9 +8459,9 @@ react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0:
integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
react-json-view-lite@^1.2.0:
- version "1.4.0"
- resolved "https://registry.yarnpkg.com/react-json-view-lite/-/react-json-view-lite-1.4.0.tgz#0ff493245f4550abe5e1f1836f170fa70bb95914"
- integrity sha512-wh6F6uJyYAmQ4fK0e8dSQMEWuvTs2Wr3el3sLD9bambX1+pSWUVXIz1RFaoy3TI1mZ0FqdpKq9YgbgTTgyrmXA==
+ version "1.5.0"
+ resolved "https://registry.yarnpkg.com/react-json-view-lite/-/react-json-view-lite-1.5.0.tgz#377cc302821717ac79a1b6d099e1891df54c8662"
+ integrity sha512-nWqA1E4jKPklL2jvHWs6s+7Na0qNgw9HCP6xehdQJeg6nPBTFZgGwyko9Q0oj+jQWKTTVRS30u0toM5wiuL3iw==
react-loadable-ssr-addon-v5-slorber@^1.0.1:
version "1.0.1"
@@ -9476,9 +8625,9 @@ reftools@^1.1.9:
integrity sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==
regenerate-unicode-properties@^10.1.0:
- version "10.1.1"
- resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz#6b0e05489d9076b04c436f318d9b067bba459480"
- integrity sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==
+ version "10.2.0"
+ resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz#626e39df8c372338ea9b8028d1f99dc3fd9c3db0"
+ integrity sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==
dependencies:
regenerate "^1.4.2"
@@ -9732,9 +8881,9 @@ rtl-detect@^1.0.4:
integrity sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==
rtlcss@^4.1.0:
- version "4.1.1"
- resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-4.1.1.tgz#f20409fcc197e47d1925996372be196fee900c0c"
- integrity sha512-/oVHgBtnPNcggP2aVXQjSy6N1mMAfHg4GSag0QtZBlD5bdDgAHwr4pydqJGd+SUCu9260+Pjqbjwtvu7EMH1KQ==
+ version "4.3.0"
+ resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-4.3.0.tgz#f8efd4d5b64f640ec4af8fa25b65bacd9e07cc97"
+ integrity sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==
dependencies:
escalade "^3.1.1"
picocolors "^1.0.0"
@@ -9783,9 +8932,9 @@ safe-regex-test@^1.0.3:
integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
sax@^1.2.4:
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/sax/-/sax-1.3.0.tgz#a5dbe77db3be05c9d1ee7785dbd3ea9de51593d0"
- integrity sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==
+ version "1.4.1"
+ resolved "https://registry.yarnpkg.com/sax/-/sax-1.4.1.tgz#44cc8988377f126304d3b3fc1010c733b929ef0f"
+ integrity sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==
scheduler@^0.23.2:
version "0.23.2"
@@ -10067,9 +9216,9 @@ sisteransi@^1.0.5:
integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==
sitemap@^7.1.1:
- version "7.1.1"
- resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-7.1.1.tgz#eeed9ad6d95499161a3eadc60f8c6dce4bea2bef"
- integrity sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==
+ version "7.1.2"
+ resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-7.1.2.tgz#6ce1deb43f6f177c68bc59cf93632f54e3ae6b72"
+ integrity sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==
dependencies:
"@types/node" "^17.0.5"
"@types/sax" "^1.2.1"
@@ -10120,16 +9269,16 @@ sockjs@^0.3.24:
uuid "^8.3.2"
websocket-driver "^0.7.4"
-socks-proxy-agent@^8.0.2:
- version "8.0.3"
- resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-8.0.3.tgz#6b2da3d77364fde6292e810b496cb70440b9b89d"
- integrity sha512-VNegTZKhuGq5vSD6XNKlbqWhyt/40CgoEw8XxD6dhnm8Jq9IEa3nIa4HwnM8XOqU0CdB0BwWVXusqiFXfHB3+A==
+socks-proxy-agent@^8.0.2, socks-proxy-agent@^8.0.4:
+ version "8.0.4"
+ resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-8.0.4.tgz#9071dca17af95f483300316f4b063578fa0db08c"
+ integrity sha512-GNAq/eg8Udq2x0eNiFkr9gRg5bA7PXEWagQdeRX4cPSG+X/8V38v637gim9bjFptMk1QWsCTr0ttrJEiXbNnRw==
dependencies:
agent-base "^7.1.1"
debug "^4.3.4"
- socks "^2.7.1"
+ socks "^2.8.3"
-socks@^2.7.1:
+socks@^2.8.3:
version "2.8.3"
resolved "https://registry.yarnpkg.com/socks/-/socks-2.8.3.tgz#1ebd0f09c52ba95a09750afe3f3f9f724a800cb5"
integrity sha512-l5x7VUUWbjVFbafGLxPWkYsHIhEvmF85tbIeFZWc8ZPtoMyybuEhL7Jye/ooC4/d48FgOjSJXgsF/AJPYCW8Zw==
@@ -10142,10 +9291,10 @@ sort-css-media-queries@2.2.0:
resolved "https://registry.yarnpkg.com/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz#aa33cf4a08e0225059448b6c40eddbf9f1c8334c"
integrity sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==
-source-map-js@^1.0.1, source-map-js@^1.2.0:
- version "1.2.0"
- resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af"
- integrity sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==
+source-map-js@^1.0.1, source-map-js@^1.2.0, source-map-js@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46"
+ integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==
source-map-support@~0.5.20:
version "0.5.21"
@@ -10347,16 +9496,16 @@ style-to-object@^0.4.0:
inline-style-parser "0.1.1"
style-to-object@^1.0.0:
- version "1.0.6"
- resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-1.0.6.tgz#0c28aed8be1813d166c60d962719b2907c26547b"
- integrity sha512-khxq+Qm3xEyZfKd/y9L3oIWQimxuc4STrQKtQn8aSDRHb8mFgpukgX1hdzfrMEW6JCjyJ8p89x+IUMVnCBI1PA==
+ version "1.0.8"
+ resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-1.0.8.tgz#67a29bca47eaa587db18118d68f9d95955e81292"
+ integrity sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==
dependencies:
- inline-style-parser "0.2.3"
+ inline-style-parser "0.2.4"
styled-components@^6.1.11:
- version "6.1.12"
- resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-6.1.12.tgz#0d9d511aacfb9052936146dcc2856559e6fae4df"
- integrity sha512-n/O4PzRPhbYI0k1vKKayfti3C/IGcPf+DqcrOB7O/ab9x4u/zjqraneT5N45+sIe87cxrCApXM8Bna7NYxwoTA==
+ version "6.1.13"
+ resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-6.1.13.tgz#2d777750b773b31469bd79df754a32479e9f475e"
+ integrity sha512-M0+N2xSnAtwcVAQeFEsGWFFxXDftHUD7XrKla06QbpUMmbmtFBMMTcKWvFXtWxuD5qQkB8iU5gk6QASlx2ZRMw==
dependencies:
"@emotion/is-prop-valid" "1.2.2"
"@emotion/unitless" "0.8.1"
@@ -10464,9 +9613,9 @@ terser-webpack-plugin@^5.3.10, terser-webpack-plugin@^5.3.9:
terser "^5.26.0"
terser@^5.10.0, terser@^5.15.1, terser@^5.26.0:
- version "5.31.0"
- resolved "https://registry.yarnpkg.com/terser/-/terser-5.31.0.tgz#06eef86f17007dbad4593f11a574c7f5eb02c6a1"
- integrity sha512-Q1JFAoUKE5IMfI4Z/lkE/E6+SwgzO+x4tq4v1AyBLRj8VSYvRO6A/rQrPg1yud4g0En9EKI1TvFRF2tQFcoUkg==
+ version "5.33.0"
+ resolved "https://registry.yarnpkg.com/terser/-/terser-5.33.0.tgz#8f9149538c7468ffcb1246cfec603c16720d2db1"
+ integrity sha512-JuPVaB7s1gdFKPKTelwUyRq5Sid2A3Gko2S0PncwdBq7kN9Ti9HPWDQ06MPsEDGsZeVESjKEnyGy68quBk1w6g==
dependencies:
"@jridgewell/source-map" "^0.3.3"
acorn "^8.8.2"
@@ -10530,15 +9679,15 @@ trough@^2.0.0:
resolved "https://registry.yarnpkg.com/trough/-/trough-2.2.0.tgz#94a60bd6bd375c152c1df911a4b11d5b0256f50f"
integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==
-tslib@2.6.2, tslib@^2.0.3, tslib@^2.6.0:
+tslib@2.6.2:
version "2.6.2"
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
-tslib@^2.0.1:
- version "2.6.3"
- resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0"
- integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==
+tslib@^2.0.1, tslib@^2.0.3, tslib@^2.6.0:
+ version "2.7.0"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.7.0.tgz#d9b40c5c40ab59e8738f297df3087bf1a2690c01"
+ integrity sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==
type-fest@^1.0.1:
version "1.4.0"
@@ -10624,15 +9773,20 @@ unbox-primitive@^1.0.2:
has-symbols "^1.0.3"
which-boxed-primitive "^1.0.2"
-undici-types@~5.26.4:
- version "5.26.5"
- resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
- integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
+undici-types@~6.19.2:
+ version "6.19.8"
+ resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02"
+ integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==
+
+undici@^6.19.5:
+ version "6.19.8"
+ resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.8.tgz#002d7c8a28f8cc3a44ff33c3d4be4d85e15d40e1"
+ integrity sha512-U8uCCl2x9TK3WANvmBavymRzxbfFYG+tAu+fgx3zxQy3qdagQqBLwJVrdyO1TBfUXvfKveMKJZhpvUYoOjM+4g==
unicode-canonical-property-names-ecmascript@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc"
- integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz#cb3173fe47ca743e228216e4a3ddc4c84d628cc2"
+ integrity sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==
unicode-emoji-modifier-base@^1.0.0:
version "1.0.0"
@@ -10648,9 +9802,9 @@ unicode-match-property-ecmascript@^2.0.0:
unicode-property-aliases-ecmascript "^2.0.0"
unicode-match-property-value-ecmascript@^2.1.0:
- version "2.1.0"
- resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz#cb5fffdcd16a05124f5a4b0bf7c3770208acbbe0"
- integrity sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==
+ version "2.2.0"
+ resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz#a0401aee72714598f739b68b104e4fe3a0cb3c71"
+ integrity sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==
unicode-property-aliases-ecmascript@^2.0.0:
version "2.1.0"
@@ -10658,9 +9812,9 @@ unicode-property-aliases-ecmascript@^2.0.0:
integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==
unified@^11.0.0, unified@^11.0.3, unified@^11.0.4:
- version "11.0.4"
- resolved "https://registry.yarnpkg.com/unified/-/unified-11.0.4.tgz#f4be0ac0fe4c88cb873687c07c64c49ed5969015"
- integrity sha512-apMPnyLjAX+ty4OrNap7yumyVAMlKx5IWU2wlzzUdYJO9A8f1p9m/gywF/GM2ZDFcjQPrx59Mc90KwmxsoklxQ==
+ version "11.0.5"
+ resolved "https://registry.yarnpkg.com/unified/-/unified-11.0.5.tgz#f66677610a5c0a9ee90cab2b8d4d66037026d9e1"
+ integrity sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==
dependencies:
"@types/unist" "^3.0.0"
bail "^2.0.0"
@@ -10698,14 +9852,6 @@ unist-util-position@^5.0.0:
dependencies:
"@types/unist" "^3.0.0"
-unist-util-remove-position@^5.0.0:
- version "5.0.0"
- resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz#fea68a25658409c9460408bc6b4991b965b52163"
- integrity sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==
- dependencies:
- "@types/unist" "^3.0.0"
- unist-util-visit "^5.0.0"
-
unist-util-stringify-position@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz#449c6e21a880e0855bf5aabadeb3a740314abac2"
@@ -10740,14 +9886,6 @@ unpipe@1.0.0, unpipe@~1.0.0:
resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
-update-browserslist-db@^1.0.13:
- version "1.0.15"
- resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.15.tgz#60ed9f8cba4a728b7ecf7356f641a31e3a691d97"
- integrity sha512-K9HWH62x3/EalU1U6sjSZiylm9C8tgq2mSvshZpqc7QE69RaA2qjhkW2HlNA0tFpEbtyFz7HTqbSdN4MSwUodA==
- dependencies:
- escalade "^3.1.2"
- picocolors "^1.0.0"
-
update-browserslist-db@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e"
@@ -10776,7 +9914,12 @@ update-notifier@^6.0.2:
semver-diff "^4.0.0"
xdg-basedir "^5.1.0"
-uri-js@^4.2.2, uri-js@^4.4.1:
+uri-js-replace@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/uri-js-replace/-/uri-js-replace-1.0.1.tgz#c285bb352b701c9dfdaeffc4da5be77f936c9048"
+ integrity sha512-W+C9NWNLFOoBI2QWDp4UT9pv65r2w5Cx+3sTYFvtMdDBxkKt1syCqsUdSFAChbEe1uK5TfS04wt/nGwmaeIQ0g==
+
+uri-js@^4.2.2:
version "4.4.1"
resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e"
integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==
@@ -10838,9 +9981,9 @@ vary@~1.1.2:
integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==
vfile-location@^5.0.0:
- version "5.0.2"
- resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-5.0.2.tgz#220d9ca1ab6f8b2504a4db398f7ebc149f9cb464"
- integrity sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==
+ version "5.0.3"
+ resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-5.0.3.tgz#cb9eacd20f2b6426d19451e0eafa3d0a846225c3"
+ integrity sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==
dependencies:
"@types/unist" "^3.0.0"
vfile "^6.0.0"
@@ -10854,12 +9997,11 @@ vfile-message@^4.0.0:
unist-util-stringify-position "^4.0.0"
vfile@^6.0.0, vfile@^6.0.1:
- version "6.0.1"
- resolved "https://registry.yarnpkg.com/vfile/-/vfile-6.0.1.tgz#1e8327f41eac91947d4fe9d237a2dd9209762536"
- integrity sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==
+ version "6.0.3"
+ resolved "https://registry.yarnpkg.com/vfile/-/vfile-6.0.3.tgz#3652ab1c496531852bf55a6bac57af981ebc38ab"
+ integrity sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==
dependencies:
"@types/unist" "^3.0.0"
- unist-util-stringify-position "^4.0.0"
vfile-message "^4.0.0"
vscode-languageserver-textdocument@^1.0.12:
@@ -10873,9 +10015,9 @@ vscode-uri@^3.0.8:
integrity sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==
watchpack@^2.4.1:
- version "2.4.1"
- resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.1.tgz#29308f2cac150fa8e4c92f90e0ec954a9fed7fff"
- integrity sha512-8wrBCMtVhqcXP2Sup1ctSkga6uc2Bx0IIvKyT7yTFier5AXHooSI+QyQQAtTb7+E0IUCCKyTFmXqdqgum2XWGg==
+ version "2.4.2"
+ resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.2.tgz#2feeaed67412e7c33184e5a79ca738fbd38564da"
+ integrity sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==
dependencies:
glob-to-regexp "^0.4.1"
graceful-fs "^4.1.2"
@@ -11029,6 +10171,18 @@ websocket-extensions@>=0.1.1:
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42"
integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==
+whatwg-encoding@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5"
+ integrity sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==
+ dependencies:
+ iconv-lite "0.6.3"
+
+whatwg-mimetype@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a"
+ integrity sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==
+
whatwg-url@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d"
@@ -11160,7 +10314,7 @@ yaml@^1.10.0, yaml@^1.7.2:
resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b"
integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==
-yaml@^2.5.0:
+yaml@^2.5.1:
version "2.5.1"
resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.5.1.tgz#c9772aacf62cb7494a95b0c4f1fb065b563db130"
integrity sha512-bLQOjaX/ADgQ20isPJRvF0iRUHIxVhYvr53Of7wGcWlO2jvtUlH5m87DsmulFVxRpNLOnI4tB6p/oh8D7kpn9Q==
@@ -11189,9 +10343,9 @@ yocto-queue@^0.1.0:
integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==
yocto-queue@^1.0.0:
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251"
- integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.1.1.tgz#fef65ce3ac9f8a32ceac5a634f74e17e5b232110"
+ integrity sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==
zwitch@^2.0.0:
version "2.0.4"
From 3a8e34da699a7fabaa49262d21d236c9a1624eff Mon Sep 17 00:00:00 2001
From: Mahendra Paipuri
Date: Tue, 17 Sep 2024 17:53:30 +0200
Subject: [PATCH 18/18] fix: Correct BTF map declarations
Signed-off-by: Mahendra Paipuri
---
pkg/collector/bpf/network/bpf_network.h | 6 +++---
pkg/collector/bpf/vfs/bpf_vfs.h | 10 +++++-----
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/pkg/collector/bpf/network/bpf_network.h b/pkg/collector/bpf/network/bpf_network.h
index 1389bc1f..69c5ff3d 100644
--- a/pkg/collector/bpf/network/bpf_network.h
+++ b/pkg/collector/bpf/network/bpf_network.h
@@ -36,7 +36,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} ingress_accumulator SEC(".maps");
/* Map to track ingress events */
@@ -45,7 +45,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} egress_accumulator SEC(".maps");
/* Map to track retransmission events */
@@ -54,7 +54,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct net_event); /* Key is the net_event struct */
__type(value, struct net_stats);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} retrans_accumulator SEC(".maps");
/**
diff --git a/pkg/collector/bpf/vfs/bpf_vfs.h b/pkg/collector/bpf/vfs/bpf_vfs.h
index b86fad1a..7139a3ad 100644
--- a/pkg/collector/bpf/vfs/bpf_vfs.h
+++ b/pkg/collector/bpf/vfs/bpf_vfs.h
@@ -46,7 +46,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
__type(value, struct vfs_rw_event);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} write_accumulator SEC(".maps");
/* Map to track vfs_read events */
@@ -55,7 +55,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, struct vfs_event_key); /* Key is the vfs_event_key struct */
__type(value, struct vfs_rw_event);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} read_accumulator SEC(".maps");
/* Map to track vfs_open events */
@@ -64,7 +64,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} open_accumulator SEC(".maps");
/* Map to track vfs_create events */
@@ -73,7 +73,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} create_accumulator SEC(".maps");
/* Map to track vfs_unlink events */
@@ -82,7 +82,7 @@ struct {
__uint(max_entries, MAX_MAP_ENTRIES);
__type(key, __u32); /* Key is the vfs_event_key struct */
__type(value, struct vfs_inode_event);
- __type(flags, BPF_F_NO_COMMON_LRU);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
} unlink_accumulator SEC(".maps");
/**