Skip to content

Commit

Permalink
replace custom go struct with cilium auto generated ones
Browse files Browse the repository at this point in the history
Signed-off-by: msherif1234 <mmahmoud@redhat.com>
  • Loading branch information
msherif1234 committed Jan 24, 2023
1 parent 883b1a6 commit 30722c5
Show file tree
Hide file tree
Showing 26 changed files with 455 additions and 413 deletions.
13 changes: 11 additions & 2 deletions bpf/flow.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ typedef struct flow_metrics_t {
u8 errno;
} __attribute__((packed)) flow_metrics;

// Force emitting struct flow_metrics into the ELF.
const struct flow_metrics_t *unused1 __attribute__((unused));

// Attributes that uniquely identify a flow
typedef struct flow_id_t {
u16 eth_protocol;
Expand All @@ -36,8 +39,8 @@ typedef struct flow_id_t {
// L3 network layer
// IPv4 addresses are encoded as IPv6 addresses with prefix ::ffff/96
// as described in https://datatracker.ietf.org/doc/html/rfc4038#section-4.2
struct in6_addr src_ip;
struct in6_addr dst_ip;
u8 src_ip[16];
u8 dst_ip[16];
// L4 transport layer
u16 src_port;
u16 dst_port;
Expand All @@ -46,11 +49,17 @@ typedef struct flow_id_t {
u32 if_index;
} __attribute__((packed)) flow_id;

// Force emitting struct flow_id into the ELF.
const struct flow_id_t *unused2 __attribute__((unused));

// Flow record is a tuple containing both flow identifier and metrics. It is used to send
// a complete flow via ring buffer when only when the accounting hashmap is full.
// Contents in this struct must match byte-by-byte with Go's pkc/flow/Record struct
typedef struct flow_record_t {
flow_id id;
flow_metrics metrics;
} __attribute__((packed)) flow_record;

// Force emitting struct flow_record into the ELF.
const struct flow_record_t *unused3 __attribute__((unused));
#endif
12 changes: 6 additions & 6 deletions bpf/flows.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,10 @@ static inline int fill_iphdr(struct iphdr *ip, void *data_end, flow_id *id, u16
return DISCARD;
}

__builtin_memcpy(id->src_ip.s6_addr, ip4in6, sizeof(ip4in6));
__builtin_memcpy(id->dst_ip.s6_addr, ip4in6, sizeof(ip4in6));
__builtin_memcpy(id->src_ip.s6_addr + sizeof(ip4in6), &ip->saddr, sizeof(ip->saddr));
__builtin_memcpy(id->dst_ip.s6_addr + sizeof(ip4in6), &ip->daddr, sizeof(ip->daddr));
__builtin_memcpy(id->src_ip, ip4in6, sizeof(ip4in6));
__builtin_memcpy(id->dst_ip, ip4in6, sizeof(ip4in6));
__builtin_memcpy(id->src_ip + sizeof(ip4in6), &ip->saddr, sizeof(ip->saddr));
__builtin_memcpy(id->dst_ip + sizeof(ip4in6), &ip->daddr, sizeof(ip->daddr));
id->transport_protocol = ip->protocol;
id->src_port = 0;
id->dst_port = 0;
Expand Down Expand Up @@ -143,8 +143,8 @@ static inline int fill_ip6hdr(struct ipv6hdr *ip, void *data_end, flow_id *id, u
return DISCARD;
}

id->src_ip = ip->saddr;
id->dst_ip = ip->daddr;
__builtin_memcpy(id->src_ip, ip->saddr.in6_u.u6_addr8, 16);
__builtin_memcpy(id->dst_ip, ip->daddr.in6_u.u6_addr8, 16);
id->transport_protocol = ip->nexthdr;
id->src_port = 0;
id->dst_port = 0;
Expand Down
2 changes: 1 addition & 1 deletion pkg/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ type ebpfFlowFetcher interface {
io.Closer
Register(iface ifaces.Interface) error

LookupAndDeleteMap() map[flow.RecordKey][]flow.RecordMetrics
LookupAndDeleteMap() map[ebpf.BpfFlowId][]ebpf.BpfFlowMetrics
ReadRingBuf() (ringbuf.Record, error)
}

Expand Down
100 changes: 52 additions & 48 deletions pkg/agent/agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

"github.com/gavv/monotime"
test2 "github.com/mariomac/guara/pkg/test"
"github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf"
"github.com/netobserv/netobserv-ebpf-agent/pkg/flow"
"github.com/netobserv/netobserv-ebpf-agent/pkg/test"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -43,18 +44,21 @@ func TestFlowsAgent_InvalidConfigs(t *testing.T) {
}

var (
key1 = flow.RecordKey{
Transport: flow.Transport{SrcPort: 123, DstPort: 456},
IFIndex: 3,
key1 = ebpf.BpfFlowId{
SrcPort: 123,
DstPort: 456,
IfIndex: 3,
}
key1Dupe = flow.RecordKey{
Transport: flow.Transport{SrcPort: 123, DstPort: 456},
IFIndex: 4,
key1Dupe = ebpf.BpfFlowId{
SrcPort: 123,
DstPort: 456,
IfIndex: 4,
}

key2 = flow.RecordKey{
Transport: flow.Transport{SrcPort: 333, DstPort: 532},
IFIndex: 3,
key2 = ebpf.BpfFlowId{
SrcPort: 333,
DstPort: 532,
IfIndex: 3,
}
)

Expand All @@ -69,28 +73,28 @@ func TestFlowsAgent_Deduplication(t *testing.T) {
exported := export.Get(t, timeout)
assert.Len(t, exported, 2)

receivedKeys := map[flow.RecordKey]struct{}{}
receivedKeys := map[ebpf.BpfFlowId]struct{}{}

var key1Flows []*flow.Record
for _, f := range exported {
require.NotContains(t, receivedKeys, f.RecordKey)
receivedKeys[f.RecordKey] = struct{}{}
switch f.RecordKey {
require.NotContains(t, receivedKeys, f.Id)
receivedKeys[f.Id] = struct{}{}
switch f.Id {
case key1:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
assert.Equal(t, "foo", f.Interface)
key1Flows = append(key1Flows, f)
case key1Dupe:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
assert.Equal(t, "bar", f.Interface)
key1Flows = append(key1Flows, f)
case key2:
assert.EqualValues(t, 7, f.Packets)
assert.EqualValues(t, 33, f.Bytes)
assert.EqualValues(t, 7, f.Metrics.Packets)
assert.EqualValues(t, 33, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
}
}
Expand All @@ -106,31 +110,31 @@ func TestFlowsAgent_DeduplicationJustMark(t *testing.T) {
})

exported := export.Get(t, timeout)
receivedKeys := map[flow.RecordKey]struct{}{}
receivedKeys := map[ebpf.BpfFlowId]struct{}{}

assert.Len(t, exported, 3)
duplicates := 0
for _, f := range exported {
require.NotContains(t, receivedKeys, f.RecordKey)
receivedKeys[f.RecordKey] = struct{}{}
switch f.RecordKey {
require.NotContains(t, receivedKeys, f.Id)
receivedKeys[f.Id] = struct{}{}
switch f.Id {
case key1:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
if f.Duplicate {
duplicates++
}
assert.Equal(t, "foo", f.Interface)
case key1Dupe:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
if f.Duplicate {
duplicates++
}
assert.Equal(t, "bar", f.Interface)
case key2:
assert.EqualValues(t, 7, f.Packets)
assert.EqualValues(t, 33, f.Bytes)
assert.EqualValues(t, 7, f.Metrics.Packets)
assert.EqualValues(t, 33, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
}
}
Expand All @@ -146,28 +150,28 @@ func TestFlowsAgent_Deduplication_None(t *testing.T) {

exported := export.Get(t, timeout)
assert.Len(t, exported, 3)
receivedKeys := map[flow.RecordKey]struct{}{}
receivedKeys := map[ebpf.BpfFlowId]struct{}{}

var key1Flows []*flow.Record
for _, f := range exported {
require.NotContains(t, receivedKeys, f.RecordKey)
receivedKeys[f.RecordKey] = struct{}{}
switch f.RecordKey {
require.NotContains(t, receivedKeys, f.Id)
receivedKeys[f.Id] = struct{}{}
switch f.Id {
case key1:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
assert.Equal(t, "foo", f.Interface)
key1Flows = append(key1Flows, f)
case key1Dupe:
assert.EqualValues(t, 4, f.Packets)
assert.EqualValues(t, 66, f.Bytes)
assert.EqualValues(t, 4, f.Metrics.Packets)
assert.EqualValues(t, 66, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
assert.Equal(t, "bar", f.Interface)
key1Flows = append(key1Flows, f)
case key2:
assert.EqualValues(t, 7, f.Packets)
assert.EqualValues(t, 33, f.Bytes)
assert.EqualValues(t, 7, f.Metrics.Packets)
assert.EqualValues(t, 33, f.Metrics.Bytes)
assert.False(t, f.Duplicate)
}
}
Expand All @@ -187,7 +191,7 @@ func TestFlowsAgent_Decoration(t *testing.T) {
// add the interface name and the agent IP
for _, f := range exported {
assert.Equal(t, agentIP, f.AgentIP.String())
switch f.RecordKey {
switch f.Id {
case key1, key2:
assert.Equal(t, "foo", f.Interface)
default:
Expand All @@ -197,13 +201,13 @@ func TestFlowsAgent_Decoration(t *testing.T) {
}

func testAgent(t *testing.T, cfg *Config) *test.ExporterFake {
ebpf := test.NewTracerFake()
ebpfTracer := test.NewTracerFake()
export := test.NewExporterFake()
agent, err := flowsAgent(cfg,
test.SliceInformerFake{
{Name: "foo", Index: 3},
{Name: "bar", Index: 4},
}, ebpf, export.Export,
}, ebpfTracer, export.Export,
net.ParseIP(agentIP))
require.NoError(t, err)

Expand All @@ -215,15 +219,15 @@ func testAgent(t *testing.T, cfg *Config) *test.ExporterFake {
})

now := uint64(monotime.Now())
key1Metrics := []flow.RecordMetrics{
{Packets: 3, Bytes: 44, StartMonoTimeNs: now + 1000, EndMonoTimeNs: now + 1_000_000_000},
{Packets: 1, Bytes: 22, StartMonoTimeNs: now, EndMonoTimeNs: now + 3000},
key1Metrics := []ebpf.BpfFlowMetrics{
{Packets: 3, Bytes: 44, StartMonoTimeTs: now + 1000, EndMonoTimeTs: now + 1_000_000_000},
{Packets: 1, Bytes: 22, StartMonoTimeTs: now, EndMonoTimeTs: now + 3000},
}
key2Metrics := []flow.RecordMetrics{
{Packets: 7, Bytes: 33, StartMonoTimeNs: now, EndMonoTimeNs: now + 2_000_000_000},
key2Metrics := []ebpf.BpfFlowMetrics{
{Packets: 7, Bytes: 33, StartMonoTimeTs: now, EndMonoTimeTs: now + 2_000_000_000},
}

ebpf.AppendLookupResults(map[flow.RecordKey][]flow.RecordMetrics{
ebpfTracer.AppendLookupResults(map[ebpf.BpfFlowId][]ebpf.BpfFlowMetrics{
key1: key1Metrics,
key1Dupe: key1Metrics,
key2: key2Metrics,
Expand Down
Loading

0 comments on commit 30722c5

Please sign in to comment.