-
Notifications
You must be signed in to change notification settings - Fork 1
/
default.nix
1041 lines (992 loc) · 48.4 KB
/
default.nix
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{ config
, pkgs
, notnft
, lib
, router-lib
, server-config
, ...
}:
let
cfg = config.router-settings;
hapdConfig = {
inherit (cfg) country_code wpa_passphrase;
he_su_beamformer = true;
he_su_beamformee = true;
he_mu_beamformer = true;
he_spr_sr_control = 3;
he_default_pe_duration = 4;
he_rts_threshold = 1023;
he_mu_edca_qos_info_param_count = 0;
he_mu_edca_qos_info_q_ack = 0;
he_mu_edca_qos_info_queue_request = 0;
he_mu_edca_qos_info_txop_request = 0;
he_mu_edca_ac_be_aifsn = 8;
he_mu_edca_ac_be_aci = 0;
he_mu_edca_ac_be_ecwmin = 9;
he_mu_edca_ac_be_ecwmax = 10;
he_mu_edca_ac_be_timer = 255;
he_mu_edca_ac_bk_aifsn = 15;
he_mu_edca_ac_bk_aci = 1;
he_mu_edca_ac_bk_ecwmin = 9;
he_mu_edca_ac_bk_ecwmax = 10;
he_mu_edca_ac_bk_timer = 255;
he_mu_edca_ac_vi_ecwmin = 5;
he_mu_edca_ac_vi_ecwmax = 7;
he_mu_edca_ac_vi_aifsn = 5;
he_mu_edca_ac_vi_aci = 2;
he_mu_edca_ac_vi_timer = 255;
he_mu_edca_ac_vo_aifsn = 5;
he_mu_edca_ac_vo_aci = 3;
he_mu_edca_ac_vo_ecwmin = 5;
he_mu_edca_ac_vo_ecwmax = 7;
he_mu_edca_ac_vo_timer = 255;
preamble = true;
country3 = "0x49"; # indoor
};
# routing tables
wan_table = 1;
# vpn table, assign an id but don't actually add a rule for it, so it is the default
vpn_table = 2;
vpn_iface =
if cfg.vpn.openvpn.enable && !cfg.vpn.wireguard.enable then "tun0"
else if cfg.vpn.wireguard.enable && !cfg.vpn.openvpn.enable then "wg0"
else throw "Exactly one of OpenVPN/Wireguard must be used";
vpn_mtu = config.networking.wireguard.interfaces.${vpn_iface}.mtu or 1320;
vpn_ipv4_mss = vpn_mtu - 40;
vpn_ipv6_mss = vpn_mtu - 60;
dnatRuleMode = rule:
if rule.mode != "" then rule.mode
else if rule.target4.address or null == netAddresses.lan4 || rule.target6.address or null == netAddresses.lan6 then "rule"
else "mark";
dnatRuleProtos = rule:
let
inherit (notnft.inetProtos) tcp udp;
in
if rule.tcp && rule.udp then notnft.dsl.set [ tcp udp ]
else if rule.tcp then tcp
else if rule.udp then udp
else throw "Invalid rule: either tcp or udp must be set";
setIfNeeded = arr:
if builtins.length arr == 1 then builtins.head arr
else notnft.dsl.set arr;
# nftables rules generator
# selfIp4/selfIp6 = block packets from these addresses
# extraInetEntries = stuff to add to inet table
# extraNetdevEntries = stuff to add to netdev table
# wans = external interfaces (internet)
# lans = internal interfaces (lan)
# netdevIngressWanRules = additional rules for ingress (netdev)
# inetInboundWanRules = additional rules for input from wan (inet)
# inetInboundLanRules = same for lan
# inetForwardRules = additional forward rules besides allow lan->wan forwarding
# inetSnatRules = snat rules (changing source address, usually just called nat)
# inetDnatRules = dnat rules (changing destination address, i.e. port forwarding)
# logPrefix = log prefix for drops
mkRules = {
selfIp4,
selfIp6,
extraInetEntries ? {},
extraNetdevEntries ? {},
wans,
lans,
netdevIngressWanRules ? [],
inetInboundWanRules ? [],
inetInboundLanRules ? [],
inetForwardRules ? [],
inetSnatRules ? [],
inetDnatRules ? [],
logPrefix ? "",
}:
let
logIfWan = prefix: lib.optional (logPrefix == "wan") (notnft.dsl.log prefix);
in with notnft.dsl; with payload; ruleset {
filter = add table.netdev ({
ingress_common = add chain
([(is.eq (bit.and tcp.flags (f: bit.or f.fin f.syn)) (f: bit.or f.fin f.syn))] ++ logIfWan "${logPrefix}fin+syn drop " ++ [drop])
([(is.eq (bit.and tcp.flags (f: bit.or f.syn f.rst)) (f: bit.or f.syn f.rst))] ++ logIfWan "${logPrefix}syn+rst drop " ++ [drop])
[(is.eq (bit.and tcp.flags (f: with f; bit.or fin syn rst psh ack urg)) 0) (log "${logPrefix}null drop ") drop]
[(is tcp.flags (f: f.syn)) (is.eq tcpOpt.maxseg.size (range 0 500)) (log "${logPrefix}maxseg drop ") drop]
# reject requests with own saddr
# log if they are meant for us...
[(is.eq ip.saddr selfIp4) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self4 ") drop]
[(is.eq ip6.saddr selfIp6) (is.eq (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: f.local)) (log "${logPrefix}self6 ") drop]
# ...but ignore if they're multicast/broadcast
[return];
ingress_lan_common = add chain
# there are some issues with this, disable it for lan
# [(is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "${logPrefix}oif missing ") drop]
inetInboundLanRules
[(jump "ingress_common")];
ingress_wan_common = add chain
netdevIngressWanRules
[(jump "ingress_common")]
# [(is.ne (fib (f: with f; [ daddr iif ]) (f: f.type)) (f: set [ f.local f.broadcast f.multicast ])) (log "${logPrefix}non-{local,broadcast,multicast} ") drop]
# separate limits for echo-request and all other icmp types
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
[(is.eq ip.protocol (f: f.icmp)) (is.ne icmp.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) (limit { rate = 50; per = f: f.second; }) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.ne icmpv6.type (f: f.echo-request)) (limit { rate = 100; per = f: f.second; }) accept]
# always accept destination unreachable, time-exceeded, parameter-problem, packet-too-big
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: with f; set [ destination-unreachable time-exceeded parameter-problem ])) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded parameter-problem packet-too-big ])) accept]
# don't log echo-request drops
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) drop]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: f.echo-request)) drop]
[(is.eq ip.protocol (f: f.icmp)) (log "${logPrefix}icmp flood ") drop]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (log "${logPrefix}icmp6 flood ") drop];
}
// extraNetdevEntries
// builtins.listToAttrs (map (name: {
name = "ingress_${name}";
value = add chain { type = f: f.filter; hook = f: f.ingress; dev = name; prio = -500; policy = f: f.accept; }
[(jump "ingress_lan_common")];
}) lans)
// builtins.listToAttrs (map (name: {
name = "ingress_${name}";
value = add chain { type = f: f.filter; hook = f: f.ingress; dev = name; prio = -500; policy = f: f.accept; }
[(jump "ingress_wan_common")];
}) wans));
global = add table { family = f: f.inet; } ({
inbound_wan_common = add chain
[(vmap ct.state { established = accept; related = accept; invalid = drop; })]
[(is ct.status (f: f.dnat)) accept]
([(is.eq (bit.and tcp.flags (f: f.syn)) 0) (is.eq ct.state (f: f.new))] ++ logIfWan "${logPrefix}new non-syn " ++ [drop])
# icmp: only accept ping requests
[(is.eq ip.protocol (f: f.icmp)) (is.eq icmp.type (f: f.echo-request)) accept]
# icmpv6: accept no-route info from link-local addresses
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq ip6.saddr (cidr "fe80::/10")) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ mld-listener-query mld-listener-report mld-listener-done mld2-listener-report ]))
accept]
# icmpv6: accept commonly useful stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded echo-request echo-reply ])) accept]
# icmpv6: more common stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ packet-too-big parameter-problem ])) accept]
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.admin-prohibited))
(is.eq icmpv6.type (f: f.parameter-problem)) accept]
inetInboundWanRules;
# trust the lan
inbound_lan_common = add chain
[accept];
inbound = add chain { type = f: f.filter; hook = f: f.input; prio = f: f.filter; policy = f: f.drop; }
[(vmap meta.iifname ({
lo = accept;
}
// lib.genAttrs lans (_: jump "inbound_lan_common")
// lib.genAttrs wans (_: jump "inbound_wan_common")
))];
#[(log "${logPrefix}inbound drop ")];
forward = add chain { type = f: f.filter; hook = f: f.forward; prio = f: f.filter; policy = f: f.drop; }
[(vmap ct.state { established = accept; related = accept; invalid = drop; })]
[(is ct.status (f: f.dnat)) accept]
# accept lan->wan fw
[(is.eq meta.iifname (setIfNeeded lans)) (is.eq meta.oifname (setIfNeeded wans)) accept]
# accept lan->lan fw
[(is.eq meta.iifname (setIfNeeded lans)) (is.eq meta.oifname (setIfNeeded lans)) accept]
# accept wan->lan icmpv6 forward
[(is.eq meta.iifname (setIfNeeded wans)) (is.eq icmpv6.type (f: with f; set [ destination-unreachable time-exceeded echo-request echo-reply ])) accept]
[(is.eq meta.iifname (setIfNeeded wans)) (is.eq icmpv6.code (f: f.no-route)) (is.eq icmpv6.type (f: with f; set [ packet-too-big parameter-problem ])) accept]
[(is.eq meta.iifname (setIfNeeded wans)) (is.eq icmpv6.code (f: f.admin-prohibited)) (is.eq icmpv6.type (f: f.parameter-problem)) accept]
inetForwardRules
[(log "${logPrefix}forward drop ")];
snat = add chain { type = f: f.nat; hook = f: f.postrouting; prio = f: f.srcnat; policy = f: f.accept; }
# masquerade ipv6 because my isp doesn't provide it and my vpn gives a single ipv6
[(is.eq meta.protocol (f: set [ f.ip f.ip6 ])) (is.eq meta.iifname (setIfNeeded lans)) (is.eq meta.oifname (setIfNeeded wans)) masquerade]
inetSnatRules;
dnat = add chain { type = f: f.nat; hook = f: f.prerouting; prio = f: f.dstnat; policy = f: f.accept; }
inetDnatRules;
} // extraInetEntries);
};
netAddressesWithPrefixLen = {
lan4 = cfg.network;
lan6 = cfg.network6;
netns4 = cfg.netnsNet;
netns6 = cfg.netnsNet6;
wg4 = cfg.wgNetwork;
wg6 = cfg.wgNetwork6;
};
# parse a.b.c.d/x into { address, prefixLength }
netParsedCidrs = builtins.mapAttrs (_: router-lib.parseCidr) netAddressesWithPrefixLen;
# generate network cidr from device address
# (normalizeCidr applies network mask to the address)
netCidrs = builtins.mapAttrs (_: v: router-lib.serializeCidr (router-lib.normalizeCidr v)) netParsedCidrs;
netAddresses = builtins.mapAttrs (_: v: v.address) netParsedCidrs // {
netnsWan4 = cfg.wanNetnsAddr;
netnsWan6 = cfg.wanNetnsAddr6;
};
parsedGatewayAddr4 = router-lib.parseIp4 netAddresses.lan4;
parsedGatewayAddr6 = router-lib.parseIp6 netAddresses.lan6;
addToIp' = ip: n: lib.init ip ++ [ (lib.last ip + n) ];
addToIp = ip: n: router-lib.serializeIp (addToIp' ip n);
# server
serverAddress4 = addToIp parsedGatewayAddr4 1;
serverAddress6 = addToIp parsedGatewayAddr6 1;
# robot vacuum (valetudo)
vacuumAddress4 = addToIp parsedGatewayAddr4 2;
vacuumAddress6 = addToIp parsedGatewayAddr6 2;
# light bulb (tasmota)
lightBulbAddress4 = addToIp parsedGatewayAddr4 3;
lightBulbAddress6 = addToIp parsedGatewayAddr6 3;
# server in initrd
serverInitrdAddress4 = addToIp parsedGatewayAddr4 4;
serverInitrdAddress6 = addToIp parsedGatewayAddr6 4;
hosted-domains =
builtins.filter (domain: domain != "localhost")
(builtins.concatLists
(builtins.attrValues
(builtins.mapAttrs
(k: v: [ k ] ++ v.serverAliases)
server-config.services.nginx.virtualHosts)));
in {
imports = [ ./options.nix ./metrics.nix ];
system.stateVersion = "22.11";
boot.kernel.sysctl = {
"net.ipv4.conf.all.src_valid_mark" = true;
"net.ipv4.conf.default.src_valid_mark" = true;
"net.ipv4.conf.all.forwarding" = true;
"net.ipv6.conf.all.forwarding" = true;
};
services.openssh.enable = true;
services.fail2ban = {
enable = true;
ignoreIP = [ netCidrs.lan4 netCidrs.lan6 ];
maxretry = 10;
};
router-settings.dhcpReservations = [
{ ipAddress = serverAddress4;
macAddress = cfg.serverMac; }
{ ipAddress = vacuumAddress4;
macAddress = cfg.vacuumMac; }
{ ipAddress = lightBulbAddress4;
macAddress = cfg.lightBulbMac; }
{ ipAddress = serverInitrdAddress4;
macAddress = cfg.serverInitrdMac; }
];
router-settings.dhcp6Reservations = [
{ ipAddress = serverAddress6;
duid = cfg.serverDuid;
macAddress = cfg.serverMac; }
{ ipAddress = vacuumAddress6;
macAddress = cfg.vacuumMac; }
{ ipAddress = lightBulbAddress6;
macAddress = cfg.lightBulbMac; }
{ ipAddress = serverInitrdAddress6;
duid = cfg.serverInitrdDuid;
macAddress = cfg.serverInitrdMac; }
];
# dnat to server, take ports from its firewall config
router-settings.dnatRules = let
bannedPorts = [
631 9100 # printing
5353 # avahi
# pass it through to VPN rather than WAN
server-config.services.qbittorrent-nox.torrent.port
];
inherit (server-config.networking.firewall) allowedTCPPorts allowedTCPPortRanges allowedUDPPorts allowedUDPPortRanges;
tcpAndUdp = builtins.filter (x: !builtins.elem x bannedPorts && builtins.elem x allowedTCPPorts) allowedUDPPorts;
tcpOnly = builtins.filter (x: !builtins.elem x (bannedPorts ++ allowedUDPPorts)) allowedTCPPorts;
udpOnly = builtins.filter (x: !builtins.elem x (bannedPorts ++ allowedTCPPorts)) allowedUDPPorts;
rangesTcpAndUdp = builtins.filter (x: builtins.elem x allowedTCPPortRanges) allowedUDPPortRanges;
rangesTcpOnly = builtins.filter (x: !builtins.elem x allowedUDPPortRanges) allowedTCPPortRanges;
rangesUdpOnly = builtins.filter (x: !builtins.elem x allowedTCPPortRanges) allowedUDPPortRanges;
in lib.optional (tcpAndUdp != [ ]) {
port = setIfNeeded tcpAndUdp; tcp = true; udp = true;
target4.address = serverAddress4; target6.address = serverAddress6;
} ++ lib.optional (tcpOnly != [ ]) {
port = setIfNeeded tcpOnly; tcp = true; udp = false;
target4.address = serverAddress4; target6.address = serverAddress6;
} ++ lib.optional (udpOnly != [ ]) {
port = setIfNeeded udpOnly; tcp = false; udp = true;
target4.address = serverAddress4; target6.address = serverAddress6;
} ++ lib.flip map rangesTcpAndUdp (range: {
port = notnft.dsl.range range.from range.to; tcp = true; udp = true;
target4.address = serverAddress4; target6.address = serverAddress6;
}) ++ lib.flip map rangesTcpOnly (range: {
port = notnft.dsl.range range.from range.to; tcp = true; udp = false;
target4.address = serverAddress4; target6.address = serverAddress6;
}) ++ lib.flip map rangesUdpOnly (range: {
port = notnft.dsl.range range.from range.to; tcp = false; udp = true;
target4.address = serverAddress4; target6.address = serverAddress6;
}) ++ [ {
port = 24; tcp = true; udp = true; target4.port = 22; target6.port = 22;
target4.address = serverInitrdAddress4; target6.address = serverInitrdAddress6;
} {
inVpn = true; port = server-config.services.qbittorrent-nox.torrent.port; tcp = true; udp = true;
target4.address = serverAddress4; target6.address = serverAddress6;
} ];
router.enable = true;
# 2.4g ap
router.interfaces.wlan0 = {
bridge = "br0";
hostapd.enable = true;
hostapd.settings = {
inherit (cfg) ssid;
hw_mode = "g";
channel = 3;
chanlist = [ 3 ];
supported_rates = [ 60 90 120 180 240 360 480 540 ];
basic_rates = [ 60 120 240 ];
ht_capab = "[LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]";
} // hapdConfig;
};
# 5g ap
router.interfaces.wlan1 = {
bridge = "br0";
hostapd.enable = true;
hostapd.settings = {
ssid = "${cfg.ssid}_5G";
ieee80211h = true;
hw_mode = "a";
channel = 60;
chanlist = [ 60 ];
tx_queue_data2_burst = 2;
ht_capab = "[HT40+][LDPC][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][MAX-AMSDU-7935]";
vht_oper_chwidth = 1; # 80mhz ch width
vht_oper_centr_freq_seg0_idx = 42;
vht_capab = "[RXLDPC][SHORT-GI-80][SHORT-GI-160][TX-STBC-2BY1][SU-BEAMFORMER][SU-BEAMFORMEE][MU-BEAMFORMER][MU-BEAMFORMEE][RX-ANTENNA-PATTERN][TX-ANTENNA-PATTERN][RX-STBC-1][SOUNDING-DIMENSION-4][BF-ANTENNA-4][VHT160][MAX-MPDU-11454][MAX-A-MPDU-LEN-EXP7]";
} // hapdConfig;
};
# Unfortunately, this router's networking hardware is highly prone to breakage
# Many people have reported their routers' TCP offloading being faulty, with an error
# like this being thrown at random (it can be shortly after boot, or in a few days):
# NETDEV WATCHDOG: eth0 (mtk_soc_eth): transmit queue 3 timed out 5388 ms
# My hardware broke after a few months of use as well, so here's a potential fix
router.interfaces.eth0.extraInitCommands = ''
${pkgs.ethtool}/bin/ethtool --offload eth0 tso off
'';
# ethernet lan0-3
router.interfaces.lan0 = {
bridge = "br0";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan1 = {
bridge = "br0";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan2 = {
bridge = "br0";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
router.interfaces.lan3 = {
bridge = "br0";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
# sfp lan4
router.interfaces.lan4 = {
bridge = "br0";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
/*
# sfp lan5
router.interfaces.lan5 = {
bridge = "br0";
# i could try to figure out why this doesn't work... but i don't even have sfp to plug into this
systemdLink.matchConfig.OriginalName = "eth1";
systemdLink.linkConfig.MACAddressPolicy = "persistent";
};
*/
# ethernet wan
router.tunnels.sittun0 = lib.mkIf (cfg.vpn.tunnel.mode == "sit") {
mode = "sit";
remote = cfg.vpn.tunnel.ip;
local = cfg.vpn.tunnel.localIp;
ttl = 255;
};
router.interfaces.sittun0 = lib.mkIf (cfg.vpn.tunnel.mode == "sit") {
dependentServices = [
(lib.mkIf cfg.vpn.wireguard.enable { service = "wireguard-${vpn_iface}"; inNetns = false; })
(lib.mkIf cfg.vpn.openvpn.enable { service = "openvpn-client"; inNetns = false; })
];
ipv6.addresses = [ (router-lib.parseCidr cfg.vpn.tunnel.ifaceAddr) ];
ipv6.routes = [ { extraArgs = [ "::/0" "dev" "sittun0" ]; } ];
networkNamespace = "wan";
};
router.interfaces.wan = {
dependentServices = [
(lib.mkIf cfg.vpn.wireguard.enable { service = "wireguard-${vpn_iface}"; inNetns = false; })
(lib.mkIf cfg.vpn.openvpn.enable { service = "openvpn-client"; inNetns = false; })
{ service = "wireguard-wg1"; inNetns = false; }
];
systemdLink.linkConfig.MACAddressPolicy = "none";
systemdLink.linkConfig.MACAddress = cfg.routerMac;
dhcpcd = {
enable = true;
# technically this should be assigned to br0 instead of veth-wan-b
# however, br0 is in a different namespace!
# Considering this doesn't work at all because my ISP doesn't offer IPv6,
# I'd say this is "good enough" since it might still work in the wan
# namespace, though I can't test it.
extraConfig = ''
interface wan
ipv6rs
ia_na 0
ia_pd 1 veth-wan-b/0
'';
};
networkNamespace = "wan";
};
# disable default firewall as it uses iptables
# (and we have our own firewall)
networking.firewall.enable = false;
# br0, which bridges all lan devices
# this is "the" lan device
router.interfaces.br0 = {
dependentServices = [ { service = "unbound"; bindType = "wants"; } ];
ipv4.addresses = lib.toList (netParsedCidrs.lan4 // {
dns = [ netAddresses.lan4 ];
keaSettings.reservations = map (res: {
hw-address = res.macAddress;
ip-address = res.ipAddress;
}) cfg.dhcpReservations;
});
ipv6.addresses = lib.toList (netParsedCidrs.lan6 // {
dns = [ netAddresses.lan6 ];
gateways = [ netAddresses.lan6 ];
radvdSettings.AdvAutonomous = true;
coreradSettings.autonomous = true;
# # don't allocate addresses for most devices
# keaSettings.pools = [ ];
# just assign the reservations
keaSettings.reservations = map (res:
(if res.duid != null then { duid = res.duid; } else { hw-address = res.macAddress; }) // {
ip-addresses = [ res.ipAddress ];
}) cfg.dhcp6Reservations;
});
ipv4.routes = [
{ extraArgs = [ netCidrs.lan4 "dev" "br0" "proto" "kernel" "scope" "link" "src" netAddresses.lan4 "table" wan_table ]; }
];
ipv6.routes = [
{ extraArgs = [ netCidrs.lan6 "dev" "br0" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
];
ipv4.kea.enable = true;
ipv6.corerad.enable = true;
ipv6.kea.enable = true;
};
router.networkNamespaces.default = {
# set routing table depending on packet mark
rules = [
{ ipv6 = false; extraArgs = [ "fwmark" wan_table "table" wan_table ]; }
{ ipv6 = true; extraArgs = [ "fwmark" wan_table "table" wan_table ]; }
# below is dnat config
] ++ builtins.concatLists (map (rule: let
table = if rule.inVpn then vpn_table else wan_table;
forEachPort = func: port:
if builtins.isInt port then [ (func port) ]
else if port?set then builtins.concatLists (map (forEachPort func) port.set)
else if port?range.min then let inherit (port.range) min max; in [ (func "${toString min}-${toString max}") ]
else if port?range then let max = builtins.elemAt port.range 1; min = builtins.head port.range; in [ (func "${toString min}-${toString max}" ) ]
else throw "Unsupported expr: ${builtins.toJSON port}";
gen = len: proto: tgt:
forEachPort
(port: [ "from" "${tgt.address}/${toString len}" "ipproto" proto "sport" port "table" table ])
(if tgt.port == null then rule.port else tgt.port);
in lib.optionals (rule.tcp && rule.target4 != null) (map (x: { ipv6 = false; extraArgs = x; }) (gen 32 "tcp" rule.target4))
++ lib.optionals (rule.udp && rule.target4 != null) (map (x: { ipv6 = false; extraArgs = x; }) (gen 32 "udp" rule.target4))
++ lib.optionals (rule.tcp && rule.target6 != null) (map (x: { ipv6 = true; extraArgs = x; }) (gen 128 "tcp" rule.target6))
++ lib.optionals (rule.udp && rule.target6 != null) (map (x: { ipv6 = true; extraArgs = x; }) (gen 128 "udp" rule.target6))
) (builtins.filter (x: (x.tcp || x.udp) && dnatRuleMode x == "rule") cfg.dnatRules));
# nftables rules
# things to note: this has the code for switching between rtables
# otherwise, boring stuff
nftables.jsonRules = let
lanSet = notnft.dsl.set [ "br0" "wg1" ];
in mkRules {
selfIp4 = netAddresses.lan4;
selfIp6 = netAddresses.lan6;
lans = [ "br0" "wg1" ];
wans = [ vpn_iface "veth-wan-a" ];
logPrefix = "lan ";
# I'll leave this here in case i get weird ipv6 issues again
# inetSnatRules = with notnft.dsl; with payload; [
# [(is.eq meta.protocol (f: f.ip6)) (is.eq meta.iifname "br0") (is.eq meta.oifname "br0") masquerade]
# ];
netdevIngressWanRules = with notnft.dsl; with payload; [
# check oif only from vpn
# dont check it from veth-wan-a because of dnat fuckery and because we already check packets coming from wan there
[(is.eq meta.iifname (set [ vpn_iface "wg1" ])) (is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "lan oif missing ") drop]
];
inetDnatRules =
builtins.concatLists (map
(rule: let
protocols = dnatRuleProtos rule;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optional (rule4 != null)
[ (is.eq meta.iifname vpn_iface) (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
++ lib.optional (rule6 != null)
[ (is.eq meta.iifname vpn_iface) (is.eq ip6.nexthdr protocols) (is.eq th.dport rule.port)
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
)
(builtins.filter (x: x.inVpn && (x.tcp || x.udp)) cfg.dnatRules))
++ (with notnft.dsl; with payload; [
# hijack Microsoft DNS server hosted on Cloudflare
[(is.eq meta.iifname lanSet) (is.eq ip.daddr "162.159.36.2") (is.eq ip.protocol (f: set [ f.tcp f.udp ])) (dnat.ip netAddresses.lan4)]
] ++ lib.optionals (cfg.naughtyMacs != []) [
[(is.eq meta.iifname lanSet) (is.eq ether.saddr (setIfNeeded cfg.naughtyMacs)) (is.eq ip.protocol (f: set [ f.tcp f.udp ]))
(is.eq th.dport (set [ 53 853 ])) (dnat.ip netAddresses.lan4)]
[(is.eq meta.iifname lanSet) (is.eq ether.saddr (setIfNeeded cfg.naughtyMacs)) (is.eq ip6.nexthdr (f: set [ f.tcp f.udp ]))
(is.eq th.dport (set [ 53 853 ])) (dnat.ip6 netAddresses.lan6)]
]);
inetForwardRules = with notnft.dsl; with payload; [
# allow access to lan from the wan namespace
[(is.eq meta.iifname "veth-wan-a") (is.eq meta.oifname lanSet) accept]
# allow dnat ("ct status dnat" doesn't work)
];
inetInboundWanRules = with notnft.dsl; with payload; [
[(is.eq ip.saddr (cidr netCidrs.netns4)) accept]
[(is.eq ip6.saddr (cidr netCidrs.netns6)) accept]
];
extraInetEntries = with notnft.dsl; with payload; {
block4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; } [
(cidr "194.190.137.0" 24)
(cidr "194.190.157.0" 24)
(cidr "194.190.21.0" 24)
(cidr "194.226.130.0" 23)
# no idea what this IP is, but it got a port 53 connection from one of the devices in this network - so off it goes
"84.1.213.156"
];
block6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
# those tables get populated by unbound
force_unvpn4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; };
force_unvpn6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
force_vpn4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; };
force_vpn6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
allow_iot4 = add set { type = f: f.ipv4_addr; flags = f: with f; [ interval ]; };
allow_iot6 = add set { type = f: f.ipv6_addr; flags = f: with f; [ interval ]; };
# TODO: is type=route hook=output better? It might help get rid of the routing inconsistency
# between router-originated and forwarded traffic. The problem is type=route isn't supported
# for family=inet, so I don't care enough to test it right now.
prerouting = add chain { type = f: f.filter; hook = f: f.prerouting; prio = f: f.filter; policy = f: f.accept; } ([
[(mangle meta.mark ct.mark)]
[(is.ne meta.mark 0) accept]
# ban requests to/from block4/block6
# (might as well do this in ingress but i'm lazy)
[(is.eq ip.daddr "@block4") (log "block4 ") drop]
[(is.eq ip6.daddr "@block6") (log "block6 ") drop]
[(is.eq ip.saddr "@block4") (log "block4/s ") drop]
[(is.eq ip6.saddr "@block6") (log "block6/s ") drop]
# default to no vpn...
# [(mangle meta.mark wan_table)]
# default to vpn...
[(mangle meta.mark vpn_table)]
[(is.eq meta.mark 0)]
# ...but unvpn traffic to/from force_unvpn4/force_unvpn6
[(is.eq ip.daddr "@force_unvpn4") (mangle meta.mark wan_table)]
[(is.eq ip6.daddr "@force_unvpn6") (mangle meta.mark wan_table)]
[(is.eq ip.saddr "@force_unvpn4") (mangle meta.mark wan_table)]
[(is.eq ip6.saddr "@force_unvpn6") (mangle meta.mark wan_table)]
# ...force vpn to/from force_vpn4/force_vpn6
# (disable this if it breaks some sites)
[(is.eq ip.daddr "@force_vpn4") (mangle meta.mark vpn_table)]
[(is.eq ip6.daddr "@force_vpn6") (mangle meta.mark vpn_table)]
[(is.eq ip.saddr "@force_vpn4") (mangle meta.mark vpn_table)]
[(is.eq ip6.saddr "@force_vpn6") (mangle meta.mark vpn_table)]
# block requests to port 25 from hosts other than the server so they can't send mail pretending to originate from my domain
# only do this for lans since traffic from other interfaces isn't forwarded to wan
[(is.eq meta.iifname lanSet) (is.ne ether.saddr cfg.serverMac) (is.eq meta.l4proto (f: f.tcp)) (is.eq tcp.dport 25) (log "smtp ") drop]
# don't vpn smtp requests so spf works fine (and in case the vpn blocks requests over port 25, which it usually does)
[(is.eq meta.l4proto (f: f.tcp)) (is.eq tcp.dport 25) (mangle meta.mark wan_table)]
] ++ # 1. dnat non-vpn: change rttable to wan
builtins.concatLists (map
(rule: let
protocols = dnatRuleProtos rule;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname lanSet) (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark wan_table) ]
[ (is.eq meta.iifname "veth-wan-a") (is.eq ip.protocol protocols) (is.eq ip.daddr rule4.address)
(is.eq th.dport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark wan_table) ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname lanSet) (is.eq ip6.nexthdr protocols) (is.eq ip6.saddr rule6.address)
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark wan_table) ]
[ (is.eq meta.iifname "veth-wan-a") (is.eq ip6.nexthdr protocols) (is.eq ip6.daddr rule6.address)
(is.eq th.dport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark wan_table) ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
++ # 2. dnat vpn: change rttable to vpn
builtins.concatLists (map
(rule: let
protocols = dnatRuleProtos rule;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname lanSet) (is.eq ip.protocol protocols) (is.eq ip.saddr rule4.address)
(is.eq th.sport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark vpn_table) ]
[ (is.eq meta.iifname vpn_iface) (is.eq ip.protocol protocols) (is.eq ip.daddr rule4.address)
(is.eq th.dport (if rule4.port != null then rule4.port else rule.port)) (mangle meta.mark vpn_table) ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname lanSet) (is.eq ip6.nexthdr protocols) (is.eq ip6.saddr rule6.address)
(is.eq th.sport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark vpn_table) ]
[ (is.eq meta.iifname vpn_iface) (is.eq ip6.nexthdr protocols) (is.eq ip6.daddr rule6.address)
(is.eq th.dport (if rule6.port != null then rule6.port else rule.port)) (mangle meta.mark vpn_table) ]
])
(builtins.filter (x: x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "mark") cfg.dnatRules))
++ [
# for the robot vacuum, only allow traffic to/from allow_iot4/allow_iot6
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip.daddr (cidr netCidrs.lan4)) (is.ne ip.daddr "@allow_iot4") (log "iot4 ") drop]
[(is.eq ether.saddr cfg.vacuumMac) (is.ne ip6.daddr (cidr netCidrs.lan6)) (is.ne ip6.daddr "@allow_iot6") (log "iot6 ") drop]
[(is.eq ether.daddr cfg.vacuumMac) (is.ne ip.saddr (cidr netCidrs.lan4)) (is.ne ip.saddr "@allow_iot4") (log "iot4/d ") drop]
[(is.eq ether.daddr cfg.vacuumMac) (is.ne ip6.saddr (cidr netCidrs.lan6)) (is.ne ip6.saddr "@allow_iot6") (log "iot6/d ") drop]
# MSS clamping - since VPN reduces max MTU
# We only do this for the first packet in a connection, which should be enough
[(is.eq meta.nfproto (f: f.ipv4)) (is.eq meta.mark vpn_table) (is.gt tcpOpt.maxseg.size vpn_ipv4_mss)
(mangle tcpOpt.maxseg.size vpn_ipv4_mss)]
[(is.eq meta.nfproto (f: f.ipv6)) (is.eq meta.mark vpn_table) (is.gt tcpOpt.maxseg.size vpn_ipv6_mss)
(mangle tcpOpt.maxseg.size vpn_ipv6_mss)]
# warn about dns requests to foreign servers
# TODO: check back and see if I should forcefully redirect DNS requests from certain IPs to router
[(is.eq meta.iifname lanSet) (is.ne ip.daddr (netAddresses.lan4)) (is.eq ip.protocol (f: set [ f.tcp f.udp ]))
(is.eq th.dport (set [ 53 853 ])) (log "dns4 ")]
[(is.eq meta.iifname lanSet) (is.ne ip6.daddr (netAddresses.lan6)) (is.eq ip6.nexthdr (f: set [ f.tcp f.udp ]))
(is.eq th.dport (set [ 53 853 ])) (log "dns6 ")]
# finally, preserve the mark via conntrack
[(mangle ct.mark meta.mark)]
]);
};
};
};
# veths are virtual ethernet cables
# veth-wan-a - located in the default namespace
# veth-wan-b - located in the wan namespace
# this allows routing traffic to wan namespace from default namespace via veth-wan-a
# (and vice versa)
router.veths.veth-wan-a.peerName = "veth-wan-b";
router.interfaces.veth-wan-a = {
ipv4.addresses = [ netParsedCidrs.netns4 ];
ipv6.addresses = [ netParsedCidrs.netns6 ];
ipv4.routes = [
# default config duplicated for wan_table
{ extraArgs = [ netCidrs.netns4 "dev" "veth-wan-a" "proto" "kernel" "scope" "link" "src" netAddresses.netns4 "table" wan_table ]; }
# default all traffic to wan in wan_table
{ extraArgs = [ "default" "via" netAddresses.netnsWan4 "table" wan_table ]; }
];
ipv6.routes = [
# default config duplicated for wan_table
{ extraArgs = [ netCidrs.netns6 "dev" "veth-wan-a" "proto" "kernel" "metric" "256" "pref" "medium" "table" wan_table ]; }
# default all traffic to wan in wan_table
{ extraArgs = [ "default" "via" netAddresses.netnsWan6 "table" wan_table ]; }
];
};
router.interfaces.veth-wan-b = {
networkNamespace = "wan";
ipv4.addresses = [ {
address = netAddresses.netnsWan4;
inherit (netParsedCidrs.netns4) prefixLength;
} ];
ipv6.addresses = [ {
address = netAddresses.netnsWan6;
inherit (netParsedCidrs.netns6) prefixLength;
} ];
# allow wan->default namespace communication
ipv4.routes = [
{ extraArgs = [ netCidrs.lan4 "via" netAddresses.netns4 ]; }
];
ipv6.routes = [
{ extraArgs = [ netCidrs.lan6 "via" netAddresses.netns6 ]; }
];
};
router.networkNamespaces.wan = {
sysctl = {
"net.ipv4.conf.all.forwarding" = true;
"net.ipv6.conf.all.forwarding" = true;
};
# this is the even more boring nftables config
nftables.jsonRules = let
wans = [ "wan" ] ++ lib.optional (cfg.vpn.tunnel.mode == "sit") "sittun0";
in mkRules {
selfIp4 = netAddresses.netnsWan4;
selfIp6 = netAddresses.netnsWan6;
inherit wans;
lans = [ "veth-wan-b" ];
netdevIngressWanRules = with notnft.dsl; with payload; [
[(is.eq (fib (f: with f; [ saddr mark iif ]) (f: f.oif)) missing) (log "wan oif missing ") drop]
];
inetDnatRules =
builtins.concatLists (map
(rule: let
protocols = dnatRuleProtos rule;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname (setIfNeeded wans)) (is.eq ip.protocol protocols) (is.eq th.dport rule.port)
(if rule4.port == null then dnat.ip rule4.address else dnat.ip rule4.address rule4.port) ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname (setIfNeeded wans)) (is.eq ip6.nexthdr protocols) (is.eq th.dport rule.port)
(if rule6.port == null then dnat.ip6 rule6.address else dnat.ip6 rule6.address rule6.port) ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp)) cfg.dnatRules));
inetSnatRules =
# historically, i needed this, now i switched to ip rules
# if i ever need this again, i have it right here
builtins.concatLists (map
(rule: let
protocols = dnatRuleProtos rule;
rule4 = rule.target4; rule6 = rule.target6;
in with notnft.dsl; with payload;
lib.optionals (rule4 != null) [
[ (is.eq meta.iifname (setIfNeeded wans)) (is.eq meta.oifname "veth-wan-b") (is.eq ip.protocol protocols)
(is.eq th.dport (if rule4.port != null then rule4.port else rule.port)) (is.eq ip.daddr rule4.address) masquerade ]
] ++ lib.optionals (rule6 != null) [
[ (is.eq meta.iifname (setIfNeeded wans)) (is.eq meta.oifname "veth-wan-b") (is.eq ip6.nexthdr protocols)
(is.eq th.dport (if rule6.port != null then rule6.port else rule.port)) (is.eq ip6.daddr rule6.address) masquerade ]
])
(builtins.filter (x: !x.inVpn && (x.tcp || x.udp) && dnatRuleMode x == "snat") cfg.dnatRules));
logPrefix = "wan ";
inetInboundWanRules = with notnft.dsl; with payload; [
# DHCP
[(is.eq meta.nfproto (x: x.ipv4)) (is.eq udp.dport 68) accept]
[(is.eq meta.nfproto (x: x.ipv6)) (is.eq udp.dport 546) accept]
# igmp, used for setting up multicast groups
[(is.eq ip.protocol (f: f.igmp)) accept]
# accept router solicitation stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.type (f: with f; set [ nd-router-solicit nd-router-advert ])) accept]
# accept neighbor solicitation stuff
[(is.eq ip6.nexthdr (f: f.ipv6-icmp)) (is.eq icmpv6.code (f: f.no-route))
(is.eq icmpv6.type (f: with f; set [ nd-neighbor-solicit nd-neighbor-advert ]))
accept]
# SSH
[(is.eq meta.l4proto (f: f.tcp)) (is.eq tcp.dport 23) accept]
# wg1
[(is.eq meta.l4proto (f: with f; set [ udp tcp ])) (is.eq th.dport (set [ 854 855 ])) accept]
];
};
};
# vpn socket is in wan namespace, meaning traffic gets sent through the wan namespace
# vpn interface is in default namespace, meaning it can be used in the default namespace
networking.wireguard.interfaces.${vpn_iface} = lib.mkIf cfg.vpn.wireguard.enable
(cfg.vpn.wireguard.config // {
socketNamespace = "wan";
interfaceNamespace = "init";
});
# see https://pavluk.org/blog/2022/01/26/nixos_router.html
# (ipv6 doesn't work without this, for whatever reason)
systemd.services.ping-ipv6 = {
after = [ "network.target" "netns-wan.service" ];
wants = [ "netns-wan.service" ];
wantedBy = [ "default.target" ];
serviceConfig = {
ExecStart = "${pkgs.iputils}/bin/ping -q ${netAddresses.netnsWan6}";
Restart = "on-failure";
RestartSec = "30s";
};
};
systemd.services.vpn-tunnel = lib.mkIf (cfg.vpn.tunnel.mode == "ssh") {
description = "VPN Tunnel";
wantedBy = [
"multi-user.target"
(lib.mkIf cfg.vpn.openvpn.enable "openvpn-client.service")
(lib.mkIf cfg.vpn.wireguard.enable "wireguard-${vpn_iface}.service")
];
after = [ "network.target" "netns-wan.service" ];
bindsTo = [ "netns-wan.service" ];
stopIfChanged = false;
path = [ config.programs.ssh.package ];
script = ''
${config.programs.ssh.package}/bin/ssh \
-i /secrets/vpn/sshtunnel.key \
-L ${netAddresses.netnsWan4}:${toString cfg.vpn.tunnel.localPort}:127.0.0.1:${toString cfg.vpn.tunnel.remotePort} \
-p ${toString cfg.vpn.tunnel.port} \
-N -T -v \
${cfg.vpn.tunnel.user}@${cfg.vpn.tunnel.ip}
'';
serviceConfig = {
Restart = "always";
RestartSec = "10s";
Type = "simple";
NetworkNamespacePath = "/var/run/netns/wan";
};
};
systemd.services.openvpn-client = lib.mkIf cfg.vpn.openvpn.enable {
wantedBy = [ "nftables-netns-default.service" ];
};
services.openvpn.servers = lib.mkIf cfg.vpn.openvpn.enable {
client.config = cfg.vpn.openvpn.config;
};
# use main netns's address instead of 127.0.0.1
# this ensures all network namespaces can access it
networking.resolvconf.extraConfig = ''
name_servers="${netAddresses.netns4} ${netAddresses.netns6}"
'';
users.users.${config.common.mainUsername}.extraGroups = [ config.services.unbound.group ];
services.unbound = {
enable = true;
package = pkgs.unbound-full;
localControlSocketPath = "/run/unbound/unbound.ctl";
# we override resolvconf above manually
resolveLocalQueries = false;
settings = {
server = rec {
interface = [ netAddresses.netns4 netAddresses.netns6 netAddresses.lan4 netAddresses.lan6 ];
access-control = [ "${netCidrs.netns4} allow" "${netCidrs.netns6} allow" "${netCidrs.lan4} allow" "${netCidrs.lan6} allow" ];
aggressive-nsec = true;
do-ip6 = true;
module-config = ''"validator dynlib python iterator"'';
local-zone = [
# incompatible with avahi resolver
# ''"local." static''
''"${server-config.server.domainName}." typetransparent''
];
local-data = builtins.concatLists (map (domain:
[
''"${domain}. A ${serverAddress4}"''
''"${domain}. AAAA ${serverAddress6}"''
]) hosted-domains);
# incompatible with avahi resolver
# ++ [
# ''"retracker.local. A ${netAddresses.lan4}"''
# ''"retracker.local. AAAA ${netAddresses.lan6}"''
# ];
# performance tuning
num-threads = 4; # cpu core count
msg-cache-slabs = 4; # nearest power of 2 to num-threads
rrset-cache-slabs = msg-cache-slabs;
infra-cache-slabs = msg-cache-slabs;
key-cache-slabs = msg-cache-slabs;
so-reuseport = true;
msg-cache-size = "50m"; # (default 4m)
rrset-cache-size = "100m"; # msg*2 (default 4m)
# timeouts
unknown-server-time-limit = 752; # default=376
};
# normally it would refer to the flake path, but then the service changes on every flake update
# instead, write a new file in nix store
python.python-script = builtins.toFile "avahi-resolver-v2.py" (builtins.readFile ./avahi-resolver-v2.py);
dynlib.dynlib-file = "${pkgs.unbound-mod}/lib/libunbound_mod.so";
remote-control.control-enable = true;
};
};
environment.etc."unbound/iot_ips.json".text = builtins.toJSON [
# local multicast
"224.0.0.0/24"
# local broadcast
"255.255.255.255"
];
environment.etc."unbound/iot_domains.json".text = builtins.toJSON [
# ntp time sync
"pool.ntp.org"
# valetudo update check
"api.github.com" "github.com" "*.githubusercontent.com"
];
networking.hosts."${serverAddress4}" = hosted-domains;
networking.hosts."${serverAddress6}" = hosted-domains;
systemd.services.unbound = lib.mkIf config.services.unbound.enable {
environment.PYTHONPATH = let
unbound-python = pkgs.python3.withPackages (ps: with ps; [ pydbus dnspython ]);
in
"${unbound-python}/${unbound-python.sitePackages}";
# see https://github.com/NixOS/nixpkgs/pull/310514
environment.GI_TYPELIB_PATH = "${lib.getLib pkgs.glib}/lib/girepository-1.0";
environment.MDNS_ACCEPT_NAMES = "^(.*\\.)?local\\.$";
# resolve retracker.local to whatever router.local resolves to
# we can't add a local zone alongside using avahi resolver, so we have to use hacks like this
environment.DOMAIN_NAME_OVERRIDES = "retracker.local->router.local";
# load vpn_domains.json and vpn_ips.json, as well as unvpn_domains.json and unvpn_ips.json
# resolve domains and append it to ips and add it to the nftables sets
# TODO: allow changing family/table name
environment.NFT_QUERIES = "vpn:force_vpn4,force_vpn6;unvpn!:force_unvpn4,force_unvpn6;iot:allow_iot4,allow_iot6";
serviceConfig.EnvironmentFile = "/secrets/unbound_env";
# it needs to run after nftables has been set up because it sets up the sets
after = [ "nftables-netns-default.service" "avahi-daemon.service" ];
wants = [ "nftables-netns-default.service" "avahi-daemon.service" ];
# allow it to call nft
serviceConfig.AmbientCapabilities = [ "CAP_NET_ADMIN" ];
serviceConfig.CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
};
systemd.services.update-rkn-blacklist = {
# fetch vpn_ips.json and vpn_domains.json for unbound
script = ''
BLACKLIST=$(${pkgs.coreutils}/bin/mktemp) || exit 1
${pkgs.curl}/bin/curl "https://reestr.rublacklist.net/api/v3/ips/" -o "$BLACKLIST" || exit 1
${pkgs.jq}/bin/jq ".[0:0]" "$BLACKLIST" && chown unbound:unbound "$BLACKLIST" && mv "$BLACKLIST" /var/lib/unbound/vpn_ips.json
${pkgs.curl}/bin/curl "https://reestr.rublacklist.net/api/v3/domains/" -o "$BLACKLIST" || exit 1
${pkgs.jq}/bin/jq ".[0:0]" "$BLACKLIST" && chown unbound:unbound "$BLACKLIST" && mv "$BLACKLIST" /var/lib/unbound/vpn_domains.json
${pkgs.curl}/bin/curl "https://reestr.rublacklist.net/api/v3/dpi/" -o "$BLACKLIST" || exit 1
${pkgs.jq}/bin/jq ".[0:0]" "$BLACKLIST" && chown unbound:unbound "$BLACKLIST" && mv "$BLACKLIST" /var/lib/unbound/vpn_dpi.json
'';
serviceConfig = {
Type = "oneshot";
};
};
systemd.timers.update-rkn-blacklist = {
wantedBy = [ "timers.target" ];
partOf = [ "update-rkn-blacklist.service" ];
timerConfig.OnCalendar = [ "*-*-* 00:00:00" ]; # every day
timerConfig.RandomizedDelaySec = 43200; # execute at random time in the first 12 hours
};
# run an extra sshd so we can connect even if forwarding/routing between namespaces breaks
# (use port 23 because 22 is forwarded to the server)
systemd.services.sshd-wan = {
description = "SSH Daemon (WAN)";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "netns-wan.service" ];
bindsTo = [ "netns-wan.service" ];
stopIfChanged = false;
path = with pkgs; [ gawk config.programs.ssh.package ];
environment.LD_LIBRARY_PATH = config.system.nssModules.path;
restartTriggers = [ config.environment.etc."ssh/sshd_config".source ];
preStart = config.systemd.services.sshd.preStart;
serviceConfig = {
ExecStart = "${config.programs.ssh.package}/bin/sshd -D -f /etc/ssh/sshd_config -p 23";
KillMode = "process";
Restart = "always";
Type = "simple";
NetworkNamespacePath = "/var/run/netns/wan";
};
};
services.printing = {
enable = true;
allowFrom = [ "localhost" netCidrs.lan4 netCidrs.lan6 ];
browsing = true;
clientConf = ''
ServerName router.local
'';
defaultShared = true;
drivers = [ pkgs.hplip ];
startWhenNeeded = false;
};
# share printers (and allow unbound to resolve .local)
services.avahi = {
enable = true;
hostName = "router";
allowInterfaces = [ "br0" ];
publish = {
enable = true;
addresses = true;