# HG changeset patch # Parent 059011cfc3790fc9630a771d36233391098c3bd8 diff -r 059011cfc379 sys/netinet/tcp_reass.c --- a/sys/netinet/tcp_reass.c Fri Sep 24 17:31:36 2010 +1000 +++ b/sys/netinet/tcp_reass.c Sun Sep 26 23:26:01 2010 +1000 @@ -74,19 +74,22 @@ #include #endif /* TCPDEBUG */ +static int tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS); +static int tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS); + SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, "TCP Segment Reassembly Queue"); static VNET_DEFINE(int, tcp_reass_maxseg) = 0; #define V_tcp_reass_maxseg VNET(tcp_reass_maxseg) -SYSCTL_VNET_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, - &VNET_NAME(tcp_reass_maxseg), 0, +SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, + &VNET_NAME(tcp_reass_maxseg), 0, &tcp_reass_sysctl_maxseg, "I", "Global maximum number of TCP Segments in Reassembly Queue"); static VNET_DEFINE(int, tcp_reass_qsize) = 0; #define V_tcp_reass_qsize VNET(tcp_reass_qsize) -SYSCTL_VNET_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, - &VNET_NAME(tcp_reass_qsize), 0, +SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, + &VNET_NAME(tcp_reass_qsize), 0, &tcp_reass_sysctl_qsize, "I", "Global number of TCP Segments currently in Reassembly Queue"); static VNET_DEFINE(int, tcp_reass_maxqlen) = 48; @@ -148,7 +151,6 @@ m_freem(qe->tqe_m); uma_zfree(V_tcp_reass_zone, qe); tp->t_segqlen--; - V_tcp_reass_qsize--; } KASSERT((tp->t_segqlen == 0), @@ -156,6 +158,20 @@ tp, tp->t_segqlen)); } +static int +tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS) +{ + V_tcp_reass_maxseg = uma_zone_get_max(V_tcp_reass_zone); + return (sysctl_handle_int(oidp, arg1, arg2, req)); +} + +static int +tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS) +{ + V_tcp_reass_qsize = uma_zone_get_cur(V_tcp_reass_zone); + return (sysctl_handle_int(oidp, arg1, arg2, req)); +} + int tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) { @@ -184,12 +200,10 @@ * Limit the number of segments in the reassembly queue to prevent * holding on to too many segments (and thus running out of mbufs). * Make sure to let the missing segment through which caused this - * queue. Always keep one global queue entry spare to be able to - * process the missing segment. + * queue. */ if (th->th_seq != tp->rcv_nxt && - (V_tcp_reass_qsize + 1 >= V_tcp_reass_maxseg || - tp->t_segqlen >= V_tcp_reass_maxqlen)) { + tp->t_segqlen >= V_tcp_reass_maxqlen) { V_tcp_reass_overflows++; TCPSTAT_INC(tcps_rcvmemdrop); m_freem(m); @@ -209,7 +223,6 @@ return (0); } tp->t_segqlen++; - V_tcp_reass_qsize++; /* * Find a segment which begins after this one does. @@ -236,7 +249,6 @@ m_freem(m); uma_zfree(V_tcp_reass_zone, te); tp->t_segqlen--; - V_tcp_reass_qsize--; /* * Try to present any queued data * at the left window edge to the user. @@ -273,7 +285,6 @@ m_freem(q->tqe_m); uma_zfree(V_tcp_reass_zone, q); tp->t_segqlen--; - V_tcp_reass_qsize--; q = nq; } @@ -310,7 +321,6 @@ sbappendstream_locked(&so->so_rcv, q->tqe_m); uma_zfree(V_tcp_reass_zone, q); tp->t_segqlen--; - V_tcp_reass_qsize--; q = nq; } while (q && q->tqe_th->th_seq == tp->rcv_nxt); ND6_HINT(tp); diff -r 059011cfc379 sys/vm/uma.h --- a/sys/vm/uma.h Fri Sep 24 17:31:36 2010 +1000 +++ b/sys/vm/uma.h Sun Sep 26 23:26:01 2010 +1000 @@ -466,9 +466,20 @@ * * Return: * 0 No limit - * int The effective limit of the zone + * uint64_t The effective limit of the zone */ -int uma_zone_get_max(uma_zone_t zone); +uint64_t uma_zone_get_max(uma_zone_t zone); + +/* + * Obtains the current number of items allocated from a zone + * + * Arguments: + * zone The zone to obtain the current allocation count from + * + * Return: + * uint64_t The current number of items allocated from the zone + */ +uint64_t uma_zone_get_cur(uma_zone_t zone); /* * The following two routines (uma_zone_set_init/fini) diff -r 059011cfc379 sys/vm/uma_core.c --- a/sys/vm/uma_core.c Fri Sep 24 17:31:36 2010 +1000 +++ b/sys/vm/uma_core.c Sun Sep 26 23:26:01 2010 +1000 @@ -2797,24 +2797,44 @@ } /* See uma.h */ -int +uint64_t uma_zone_get_max(uma_zone_t zone) { - int nitems; uma_keg_t keg; + uint64_t nitems; ZONE_LOCK(zone); keg = zone_first_keg(zone); - if (keg->uk_maxpages) - nitems = keg->uk_maxpages * keg->uk_ipers; - else - nitems = 0; + nitems = keg->uk_maxpages * keg->uk_ipers; ZONE_UNLOCK(zone); return (nitems); } /* See uma.h */ +uint64_t +uma_zone_get_cur(uma_zone_t zone) +{ + int64_t nitems; + u_int i; + + ZONE_LOCK(zone); + nitems = zone->uz_allocs - zone->uz_frees; + CPU_FOREACH(i) { + /* + * See the comment in sysctl_vm_zone_stats() regarding the + * safety of accessing the per-cpu caches. With the zone lock + * held, it is safe, but can potentially result in stale data. + */ + nitems += zone->uz_cpu[i].uc_allocs - + zone->uz_cpu[i].uc_frees; + } + ZONE_UNLOCK(zone); + + return (nitems < 0 ? 0 : nitems); +} + +/* See uma.h */ void uma_zone_set_init(uma_zone_t zone, uma_init uminit) {