forked from apple/darwin-xnu
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtcp_cache.c
1294 lines (1018 loc) · 35.4 KB
/
tcp_cache.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2015-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* TCP-cache to store and retrieve TCP-related information */
#include <net/flowhash.h>
#include <net/route.h>
#include <net/necp.h>
#include <netinet/in_pcb.h>
#include <netinet/mptcp_var.h>
#include <netinet/tcp_cache.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_var.h>
#include <kern/locks.h>
#include <sys/queue.h>
#include <dev/random/randomdev.h>
typedef union {
struct in_addr addr;
struct in6_addr addr6;
} in_4_6_addr;
struct tcp_heuristic_key {
union {
uint8_t thk_net_signature[IFNET_SIGNATURELEN];
in_4_6_addr thk_ip;
};
sa_family_t thk_family;
};
struct tcp_heuristic {
SLIST_ENTRY(tcp_heuristic) list;
uint32_t th_last_access;
struct tcp_heuristic_key th_key;
char th_val_start[0]; /* Marker for memsetting to 0 */
uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */
uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */
uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */
uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */
uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */
uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */
uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */
uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */
uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */
uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */
uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */
uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */
uint32_t th_tfo_backoff; /* Current backoff timer */
uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */
uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */
uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */
th_mptcp_in_backoff:1; /* Are we avoiding MPTCP due to the backoff timer? */
char th_val_end[0]; /* Marker for memsetting to 0 */
};
struct tcp_heuristics_head {
SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics;
/* Per-hashbucket lock to avoid lock-contention */
lck_mtx_t thh_mtx;
};
struct tcp_cache_key {
sa_family_t tck_family;
struct tcp_heuristic_key tck_src;
in_4_6_addr tck_dst;
};
struct tcp_cache {
SLIST_ENTRY(tcp_cache) list;
u_int32_t tc_last_access;
struct tcp_cache_key tc_key;
u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX];
u_int8_t tc_tfo_cookie_len;
};
struct tcp_cache_head {
SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches;
/* Per-hashbucket lock to avoid lock-contention */
lck_mtx_t tch_mtx;
};
struct tcp_cache_key_src {
struct ifnet *ifp;
in_4_6_addr laddr;
in_4_6_addr faddr;
int af;
};
static u_int32_t tcp_cache_hash_seed;
size_t tcp_cache_size;
/*
* The maximum depth of the hash-bucket. This way we limit the tcp_cache to
* TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
*/
#define TCP_CACHE_BUCKET_SIZE 5
static struct tcp_cache_head *tcp_cache;
decl_lck_mtx_data(, tcp_cache_mtx);
static lck_attr_t *tcp_cache_mtx_attr;
static lck_grp_t *tcp_cache_mtx_grp;
static lck_grp_attr_t *tcp_cache_mtx_grp_attr;
static struct tcp_heuristics_head *tcp_heuristics;
decl_lck_mtx_data(, tcp_heuristics_mtx);
static lck_attr_t *tcp_heuristic_mtx_attr;
static lck_grp_t *tcp_heuristic_mtx_grp;
static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr;
static uint32_t tcp_backoff_maximum = 65536;
SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED,
&tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN");
SYSCTL_SKMEM_TCP_INT(OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED,
static int, disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)");
static uint32_t tcp_min_to_hz(uint32_t minutes)
{
if (minutes > 65536)
return ((uint32_t)65536 * 60 * TCP_RETRANSHZ);
return (minutes * 60 * TCP_RETRANSHZ);
}
/*
* This number is coupled with tcp_ecn_timeout, because we want to prevent
* integer overflow. Need to find an unexpensive way to prevent integer overflow
* while still allowing a dynamic sysctl.
*/
#define TCP_CACHE_OVERFLOW_PROTECT 9
/* Number of SYN-losses we accept */
#define TFO_MAX_COOKIE_LOSS 2
#define ECN_MAX_SYN_LOSS 2
#define MPTCP_MAX_SYN_LOSS 2
#define ECN_MAX_DROPRST 1
#define ECN_MAX_DROPRXMT 4
#define ECN_MAX_SYNRST 4
/* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */
#define TCPCACHE_F_TFO_REQ 0x01
#define TCPCACHE_F_TFO_DATA 0x02
#define TCPCACHE_F_ECN 0x04
#define TCPCACHE_F_MPTCP 0x08
#define TCPCACHE_F_ECN_DROPRST 0x10
#define TCPCACHE_F_ECN_DROPRXMT 0x20
#define TCPCACHE_F_TFO_REQ_RST 0x40
#define TCPCACHE_F_TFO_DATA_RST 0x80
#define TCPCACHE_F_ECN_SYNRST 0x100
/* Always retry ECN after backing off to this level for some heuristics */
#define ECN_RETRY_LIMIT 9
#define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \
if ((_ifp_) != NULL) { \
if ((_af_) == AF_INET6) { \
(_ifp_)->if_ipv6_stat->_stat_++;\
} else { \
(_ifp_)->if_ipv4_stat->_stat_++;\
}\
}\
}
/*
* Round up to next higher power-of 2. See "Bit Twiddling Hacks".
*
* Might be worth moving this to a library so that others
* (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
*/
static u_int32_t tcp_cache_roundup2(u_int32_t a)
{
a--;
a |= a >> 1;
a |= a >> 2;
a |= a >> 4;
a |= a >> 8;
a |= a >> 16;
a++;
return a;
}
static void tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
{
struct ifnet *ifp = tcks->ifp;
uint8_t len = sizeof(key->thk_net_signature);
uint16_t flags;
if (tcks->af == AF_INET6) {
int ret;
key->thk_family = AF_INET6;
ret = ifnet_get_netsignature(ifp, AF_INET6, &len, &flags,
key->thk_net_signature);
/*
* ifnet_get_netsignature only returns EINVAL if ifn is NULL
* (we made sure that in the other cases it does not). So,
* in this case we should take the connection's address.
*/
if (ret == ENOENT || ret == EINVAL)
memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr));
} else {
int ret;
key->thk_family = AF_INET;
ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags,
key->thk_net_signature);
/*
* ifnet_get_netsignature only returns EINVAL if ifn is NULL
* (we made sure that in the other cases it does not). So,
* in this case we should take the connection's address.
*/
if (ret == ENOENT || ret == EINVAL)
memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr));
}
}
static u_int16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key)
{
u_int32_t hash;
bzero(key, sizeof(struct tcp_cache_key));
tcp_cache_hash_src(tcks, &key->tck_src);
if (tcks->af == AF_INET6) {
key->tck_family = AF_INET6;
memcpy(&key->tck_dst.addr6, &tcks->faddr.addr6,
sizeof(struct in6_addr));
} else {
key->tck_family = AF_INET;
memcpy(&key->tck_dst.addr, &tcks->faddr.addr,
sizeof(struct in_addr));
}
hash = net_flowhash(key, sizeof(struct tcp_cache_key),
tcp_cache_hash_seed);
return (hash & (tcp_cache_size - 1));
}
static void tcp_cache_unlock(struct tcp_cache_head *head)
{
lck_mtx_unlock(&head->tch_mtx);
}
/*
* Make sure that everything that happens after tcp_getcache_with_lock()
* is short enough to justify that you hold the per-bucket lock!!!
*
* Otherwise, better build another lookup-function that does not hold the
* lock and you copy out the bits and bytes.
*
* That's why we provide the head as a "return"-pointer so that the caller
* can give it back to use for tcp_cache_unlock().
*/
static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks,
int create, struct tcp_cache_head **headarg)
{
struct tcp_cache *tpcache = NULL;
struct tcp_cache_head *head;
struct tcp_cache_key key;
u_int16_t hash;
int i = 0;
hash = tcp_cache_hash(tcks, &key);
head = &tcp_cache[hash];
lck_mtx_lock(&head->tch_mtx);
/*** First step: Look for the tcp_cache in our bucket ***/
SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0)
break;
i++;
}
/*** Second step: If it's not there, create/recycle it ***/
if ((tpcache == NULL) && create) {
if (i >= TCP_CACHE_BUCKET_SIZE) {
struct tcp_cache *oldest_cache = NULL;
u_int32_t max_age = 0;
/* Look for the oldest tcp_cache in the bucket */
SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
u_int32_t age = tcp_now - tpcache->tc_last_access;
if (age > max_age) {
max_age = age;
oldest_cache = tpcache;
}
}
VERIFY(oldest_cache != NULL);
tpcache = oldest_cache;
/* We recycle, thus let's indicate that there is no cookie */
tpcache->tc_tfo_cookie_len = 0;
} else {
/* Create a new cache and add it to the list */
tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP,
M_NOWAIT | M_ZERO);
if (tpcache == NULL)
goto out_null;
SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list);
}
memcpy(&tpcache->tc_key, &key, sizeof(key));
}
if (tpcache == NULL)
goto out_null;
/* Update timestamp for garbage collection purposes */
tpcache->tc_last_access = tcp_now;
*headarg = head;
return (tpcache);
out_null:
tcp_cache_unlock(head);
return (NULL);
}
static void tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks)
{
struct inpcb *inp = tp->t_inpcb;
memset(tcks, 0, sizeof(*tcks));
tcks->ifp = inp->inp_last_outifp;
if (inp->inp_vflag & INP_IPV6) {
memcpy(&tcks->laddr.addr6, &inp->in6p_laddr, sizeof(struct in6_addr));
memcpy(&tcks->faddr.addr6, &inp->in6p_faddr, sizeof(struct in6_addr));
tcks->af = AF_INET6;
} else {
memcpy(&tcks->laddr.addr, &inp->inp_laddr, sizeof(struct in_addr));
memcpy(&tcks->faddr.addr, &inp->inp_faddr, sizeof(struct in_addr));
tcks->af = AF_INET;
}
return;
}
static void tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len)
{
struct tcp_cache_head *head;
struct tcp_cache *tpcache;
/* Call lookup/create function */
tpcache = tcp_getcache_with_lock(tcks, 1, &head);
if (tpcache == NULL)
return;
tpcache->tc_tfo_cookie_len = len;
memcpy(tpcache->tc_tfo_cookie, cookie, len);
tcp_cache_unlock(head);
}
void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_cache_set_cookie_common(&tcks, cookie, len);
}
static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len)
{
struct tcp_cache_head *head;
struct tcp_cache *tpcache;
/* Call lookup/create function */
tpcache = tcp_getcache_with_lock(tcks, 1, &head);
if (tpcache == NULL) {
return (0);
}
if (tpcache->tc_tfo_cookie_len == 0) {
tcp_cache_unlock(head);
return (0);
}
/*
* Not enough space - this should never happen as it has been checked
* in tcp_tfo_check. So, fail here!
*/
VERIFY(tpcache->tc_tfo_cookie_len <= *len);
memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len);
*len = tpcache->tc_tfo_cookie_len;
tcp_cache_unlock(head);
return (1);
}
/*
* Get the cookie related to 'tp', and copy it into 'cookie', provided that len
* is big enough (len designates the available memory.
* Upon return, 'len' is set to the cookie's length.
*
* Returns 0 if we should request a cookie.
* Returns 1 if the cookie has been found and written.
*/
int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
return tcp_cache_get_cookie_common(&tcks, cookie, len);
}
static unsigned int tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks)
{
struct tcp_cache_head *head;
struct tcp_cache *tpcache;
unsigned int cookie_len;
/* Call lookup/create function */
tpcache = tcp_getcache_with_lock(tcks, 1, &head);
if (tpcache == NULL)
return (0);
cookie_len = tpcache->tc_tfo_cookie_len;
tcp_cache_unlock(head);
return cookie_len;
}
unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
return tcp_cache_get_cookie_len_common(&tcks);
}
static u_int16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
{
u_int32_t hash;
bzero(key, sizeof(struct tcp_heuristic_key));
tcp_cache_hash_src(tcks, key);
hash = net_flowhash(key, sizeof(struct tcp_heuristic_key),
tcp_cache_hash_seed);
return (hash & (tcp_cache_size - 1));
}
static void tcp_heuristic_unlock(struct tcp_heuristics_head *head)
{
lck_mtx_unlock(&head->thh_mtx);
}
/*
* Make sure that everything that happens after tcp_getheuristic_with_lock()
* is short enough to justify that you hold the per-bucket lock!!!
*
* Otherwise, better build another lookup-function that does not hold the
* lock and you copy out the bits and bytes.
*
* That's why we provide the head as a "return"-pointer so that the caller
* can give it back to use for tcp_heur_unlock().
*
*
* ToDo - way too much code-duplication. We should create an interface to handle
* bucketized hashtables with recycling of the oldest element.
*/
static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks,
int create, struct tcp_heuristics_head **headarg)
{
struct tcp_heuristic *tpheur = NULL;
struct tcp_heuristics_head *head;
struct tcp_heuristic_key key;
u_int16_t hash;
int i = 0;
hash = tcp_heuristics_hash(tcks, &key);
head = &tcp_heuristics[hash];
lck_mtx_lock(&head->thh_mtx);
/*** First step: Look for the tcp_heur in our bucket ***/
SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0)
break;
i++;
}
/*** Second step: If it's not there, create/recycle it ***/
if ((tpheur == NULL) && create) {
if (i >= TCP_CACHE_BUCKET_SIZE) {
struct tcp_heuristic *oldest_heur = NULL;
u_int32_t max_age = 0;
/* Look for the oldest tcp_heur in the bucket */
SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
u_int32_t age = tcp_now - tpheur->th_last_access;
if (age > max_age) {
max_age = age;
oldest_heur = tpheur;
}
}
VERIFY(oldest_heur != NULL);
tpheur = oldest_heur;
/* We recycle - set everything to 0 */
bzero(tpheur->th_val_start,
tpheur->th_val_end - tpheur->th_val_start);
} else {
/* Create a new heuristic and add it to the list */
tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP,
M_NOWAIT | M_ZERO);
if (tpheur == NULL)
goto out_null;
SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list);
}
/*
* Set to tcp_now, to make sure it won't be > than tcp_now in the
* near future.
*/
tpheur->th_ecn_backoff = tcp_now;
tpheur->th_tfo_backoff_until = tcp_now;
tpheur->th_mptcp_backoff = tcp_now;
tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
memcpy(&tpheur->th_key, &key, sizeof(key));
}
if (tpheur == NULL)
goto out_null;
/* Update timestamp for garbage collection purposes */
tpheur->th_last_access = tcp_now;
*headarg = head;
return (tpheur);
out_null:
tcp_heuristic_unlock(head);
return (NULL);
}
static void tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags)
{
struct tcp_heuristics_head *head;
struct tcp_heuristic *tpheur;
/*
* Don't attempt to create it! Keep the heuristics clean if the
* server does not support TFO. This reduces the lookup-cost on
* our side.
*/
tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
if (tpheur == NULL)
return;
if (flags & TCPCACHE_F_TFO_DATA) {
tpheur->th_tfo_data_loss = 0;
}
if (flags & TCPCACHE_F_TFO_REQ) {
tpheur->th_tfo_req_loss = 0;
}
if (flags & TCPCACHE_F_TFO_DATA_RST) {
tpheur->th_tfo_data_rst = 0;
}
if (flags & TCPCACHE_F_TFO_REQ_RST) {
tpheur->th_tfo_req_rst = 0;
}
if (flags & TCPCACHE_F_ECN) {
tpheur->th_ecn_loss = 0;
tpheur->th_ecn_synrst = 0;
}
if (flags & TCPCACHE_F_MPTCP)
tpheur->th_mptcp_loss = 0;
tcp_heuristic_unlock(head);
}
void tcp_heuristic_tfo_success(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
uint8_t flag = 0;
tcp_cache_key_src_create(tp, &tcks);
if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)
flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ |
TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST );
if (tp->t_tfo_stats & TFO_S_COOKIE_REQ)
flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
tcp_heuristic_reset_counters(&tcks, flag);
}
void tcp_heuristic_mptcp_success(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP);
}
void tcp_heuristic_ecn_success(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
}
static void __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur)
{
if (tpheur->th_tfo_in_backoff)
return;
tpheur->th_tfo_in_backoff = 1;
if (tpheur->th_tfo_enabled_time) {
uint32_t old_backoff = tpheur->th_tfo_backoff;
tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time);
if (tpheur->th_tfo_backoff > old_backoff)
tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
}
tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff;
/* Then, increase the backoff time */
tpheur->th_tfo_backoff *= 2;
if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum))
tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
}
static void tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks)
{
struct tcp_heuristics_head *head;
struct tcp_heuristic *tpheur;
tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
if (tpheur == NULL)
return;
__tcp_heuristic_tfo_middlebox_common(tpheur);
tcp_heuristic_unlock(head);
}
static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks,
u_int32_t flags)
{
struct tcp_heuristics_head *head;
struct tcp_heuristic *tpheur;
tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
if (tpheur == NULL)
return;
/* Limit to prevent integer-overflow during exponential backoff */
if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_tfo_data_loss++;
if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS)
__tcp_heuristic_tfo_middlebox_common(tpheur);
}
if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_tfo_req_loss++;
if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS)
__tcp_heuristic_tfo_middlebox_common(tpheur);
}
if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_tfo_data_rst++;
if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS)
__tcp_heuristic_tfo_middlebox_common(tpheur);
}
if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_tfo_req_rst++;
if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS)
__tcp_heuristic_tfo_middlebox_common(tpheur);
}
if ((flags & TCPCACHE_F_ECN) && tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_ecn_loss++;
if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) {
tcpstat.tcps_ecn_fallback_synloss++;
TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_synloss);
tpheur->th_ecn_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) <<
(tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS));
}
}
if ((flags & TCPCACHE_F_MPTCP) &&
tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_mptcp_loss++;
if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) {
/*
* Yes, we take tcp_ecn_timeout, to avoid adding yet
* another sysctl that is just used for testing.
*/
tpheur->th_mptcp_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) <<
(tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS));
}
}
if ((flags & TCPCACHE_F_ECN_DROPRST) &&
tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_ecn_droprst++;
if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) {
tcpstat.tcps_ecn_fallback_droprst++;
TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
ecn_fallback_droprst);
tpheur->th_ecn_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) <<
(tpheur->th_ecn_droprst - ECN_MAX_DROPRST));
}
}
if ((flags & TCPCACHE_F_ECN_DROPRXMT) &&
tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_ecn_droprxmt++;
if (tpheur->th_ecn_droprxmt >= ECN_MAX_DROPRXMT) {
tcpstat.tcps_ecn_fallback_droprxmt++;
TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
ecn_fallback_droprxmt);
tpheur->th_ecn_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) <<
(tpheur->th_ecn_droprxmt - ECN_MAX_DROPRXMT));
}
}
if ((flags & TCPCACHE_F_ECN_SYNRST) &&
tpheur->th_ecn_synrst < TCP_CACHE_OVERFLOW_PROTECT) {
tpheur->th_ecn_synrst++;
if (tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
tcpstat.tcps_ecn_fallback_synrst++;
TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
ecn_fallback_synrst);
tpheur->th_ecn_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) <<
(tpheur->th_ecn_synrst - ECN_MAX_SYNRST));
}
}
tcp_heuristic_unlock(head);
}
void tcp_heuristic_tfo_loss(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
uint32_t flag = 0;
tcp_cache_key_src_create(tp, &tcks);
if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)
flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ);
if (tp->t_tfo_stats & TFO_S_COOKIE_REQ)
flag = TCPCACHE_F_TFO_REQ;
tcp_heuristic_inc_counters(&tcks, flag);
}
void tcp_heuristic_tfo_rst(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
uint32_t flag = 0;
tcp_cache_key_src_create(tp, &tcks);
if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)
flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
if (tp->t_tfo_stats & TFO_S_COOKIE_REQ)
flag = TCPCACHE_F_TFO_REQ_RST;
tcp_heuristic_inc_counters(&tcks, flag);
}
void tcp_heuristic_mptcp_loss(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP);
}
void tcp_heuristic_ecn_loss(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN);
}
void tcp_heuristic_ecn_droprst(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
}
void tcp_heuristic_ecn_droprxmt(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT);
}
void tcp_heuristic_ecn_synrst(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
}
void tcp_heuristic_tfo_middlebox(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tp->t_tfo_flags |= TFO_F_HEURISTIC_DONE;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_tfo_middlebox_common(&tcks);
}
static void tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks)
{
struct tcp_heuristics_head *head;
struct tcp_heuristic *tpheur;
tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
if (tpheur == NULL)
return;
/* Must be done before, otherwise we will start off with expo-backoff */
tpheur->th_ecn_backoff = tcp_now +
(tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive));
/*
* Ugly way to prevent integer overflow... limit to prevent in
* overflow during exp. backoff.
*/
if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT)
tpheur->th_ecn_aggressive++;
tcp_heuristic_unlock(head);
}
void tcp_heuristic_ecn_aggressive(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
tcp_heuristic_ecn_aggressive_common(&tcks);
}
static boolean_t tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks)
{
struct tcp_heuristics_head *head;
struct tcp_heuristic *tpheur;
if (disable_tcp_heuristics)
return (TRUE);
/* Get the tcp-heuristic. */
tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
if (tpheur == NULL)
return (TRUE);
if (tpheur->th_tfo_in_backoff == 0)
goto tfo_ok;
if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) {
tpheur->th_tfo_in_backoff = 0;
tpheur->th_tfo_enabled_time = tcp_now;
goto tfo_ok;
}
tcp_heuristic_unlock(head);
return (FALSE);
tfo_ok:
tcp_heuristic_unlock(head);
return (TRUE);
}
boolean_t tcp_heuristic_do_tfo(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
tcp_cache_key_src_create(tp, &tcks);
if (tcp_heuristic_do_tfo_common(&tcks))
return (TRUE);
return (FALSE);
}
boolean_t tcp_heuristic_do_mptcp(struct tcpcb *tp)
{
struct tcp_cache_key_src tcks;
struct tcp_heuristics_head *head = NULL;
struct tcp_heuristic *tpheur;
if (disable_tcp_heuristics)
return (TRUE);
tcp_cache_key_src_create(tp, &tcks);
/* Get the tcp-heuristic. */
tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head);
if (tpheur == NULL)
return (TRUE);
if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now))
goto fallback;
tcp_heuristic_unlock(head);
return (TRUE);
fallback:
if (head)