Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 159798 Details for
Bug 249316
backport receive window mgmt code to for SCTP to RHEL4
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
patch to properly manage association rcvbuf space
sctp-rwnd-mgmt-rhel4.patch (text/plain), 18.42 KB, created by
Neil Horman
on 2007-07-23 18:09:18 UTC
(
hide
)
Description:
patch to properly manage association rcvbuf space
Filename:
MIME Type:
Creator:
Neil Horman
Created:
2007-07-23 18:09:18 UTC
Size:
18.42 KB
patch
obsolete
>--- linux-2.6.9/include/linux/sysctl.h.orig 2007-07-20 09:44:34.000000000 -0400 >+++ linux-2.6.9/include/linux/sysctl.h 2007-07-20 09:46:18.000000000 -0400 >@@ -649,6 +649,10 @@ > NET_SCTP_PRSCTP_ENABLE = 14, > NET_SCTP_SNDBUF_POLICY = 15, > NET_SCTP_RCVBUF_POLICY = 16, >+ NET_SCTP_BUF_MEM = 17, >+ NET_SCTP_BUF_RMEM = 18, >+ NET_SCTP_BUF_WMEM = 19, >+ > }; > > /* /proc/sys/net/bridge */ >--- linux-2.6.9/include/net/sctp/ulpevent.h.orig 2004-10-18 17:53:21.000000000 -0400 >+++ linux-2.6.9/include/net/sctp/ulpevent.h 2007-07-20 09:48:47.000000000 -0400 >@@ -63,6 +63,7 @@ > __u32 cumtsn; > int msg_flags; > int iif; >+ unsigned int rmem_len; > }; > > /* Retrieve the skb this event sits inside of. */ >@@ -78,7 +79,7 @@ > } > > struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int gfp); >-void sctp_ulpevent_init(struct sctp_ulpevent *, int flags); >+void sctp_ulpevent_init(struct sctp_ulpevent *, int flags, unsigned int len); > void sctp_ulpevent_free(struct sctp_ulpevent *); > int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); > void sctp_queue_purge_ulpevents(struct sk_buff_head *list); >--- linux-2.6.9/include/net/sctp/sctp.h.orig 2007-07-20 09:44:34.000000000 -0400 >+++ linux-2.6.9/include/net/sctp/sctp.h 2007-07-20 09:46:18.000000000 -0400 >@@ -137,6 +137,7 @@ > void sctp_write_space(struct sock *sk); > unsigned int sctp_poll(struct file *file, struct socket *sock, > poll_table *wait); >+void sctp_sock_rfree(struct sk_buff *skb); > > /* > * sctp/primitive.c >@@ -386,6 +387,24 @@ > return result; > } > >+/* SCTP version of skb_set_owner_r. We need this one because >+ * of the way we have to do receive buffer accounting on bundled >+ * chunks. >+ */ >+static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) >+{ >+ struct sctp_ulpevent *event = sctp_skb2event(skb); >+ >+ skb->sk = sk; >+ skb->destructor = sctp_sock_rfree; >+ atomic_add(event->rmem_len, &sk->sk_rmem_alloc); >+ /* >+ * This mimics the behavior of >+ * sk_stream_set_owner_r >+ */ >+ sk->sk_forward_alloc -= event->rmem_len; >+} >+ > /* Tests if the list has one and only one entry. */ > static inline int sctp_list_single_entry(struct list_head *head) > { >--- linux-2.6.9/net/sctp/socket.c.orig 2007-07-20 09:44:16.000000000 -0400 >+++ linux-2.6.9/net/sctp/socket.c 2007-07-20 09:46:18.000000000 -0400 >@@ -109,23 +109,42 @@ > > extern kmem_cache_t *sctp_bucket_cachep; > >+extern int sysctl_sctp_mem[3]; >+extern int sysctl_sctp_rmem[3]; >+extern int sysctl_sctp_wmem[3]; >+ >+int sctp_memory_pressure; >+atomic_t sctp_memory_allocated; >+atomic_t sctp_sockets_allocated; >+ >+static void sctp_enter_memory_pressure(void) >+{ >+ sctp_memory_pressure = 1; >+} >+ > /* Get the sndbuf space available at the time on the association. */ > static inline int sctp_wspace(struct sctp_association *asoc) > { >+ int amt; > struct sock *sk = asoc->base.sk; >- int amt = 0; > >- if(asoc->ep->sndbuf_policy) { >- /* make sure that no association uses more than sk_sndbuf */ >- amt = sk->sk_sndbuf - asoc->sndbuf_used; >+ if (asoc->ep->sndbuf_policy) >+ amt = asoc->sndbuf_used; >+ else >+ amt = atomic_read(&sk->sk_wmem_alloc); >+ >+ if (amt >= sk->sk_sndbuf) { >+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) >+ amt = 0; >+ else { >+ amt = sk_stream_wspace(sk); >+ if (amt < 0) >+ amt = 0; >+ } > } else { >- /* do socket level accounting */ >- amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); >+ amt = sk->sk_sndbuf - amt; > } > >- if (amt < 0) >- amt = 0; >- > return amt; > } > >@@ -161,6 +180,7 @@ > sizeof(struct sctp_chunk); > > atomic_add(sizeof(struct sctp_chunk),&sk->sk_wmem_alloc); >+ sk_charge_skb(sk, chunk->skb); > } > > /* Verify that this is a valid address. */ >@@ -2566,6 +2586,7 @@ > sp->hmac = NULL; > > SCTP_DBG_OBJCNT_INC(sock); >+ atomic_inc(&sctp_sockets_allocated); > return 0; > } > >@@ -2579,7 +2600,7 @@ > /* Release our hold on the endpoint. */ > ep = sctp_sk(sk)->ep; > sctp_endpoint_free(ep); >- >+ atomic_dec(&sctp_sockets_allocated); > return 0; > } > >@@ -4312,12 +4333,38 @@ > > atomic_sub(sizeof(struct sctp_chunk),&sk->sk_wmem_alloc); > >+ /* >+ * This undoes what is done via sk_charge_skb >+ */ >+ sk->sk_wmem_queued -= skb->truesize; >+ sk->sk_forward_alloc += skb->truesize; >+ > sock_wfree(skb); > __sctp_write_space(asoc); > > sctp_association_put(asoc); > } > >+/* Do accounting for the receive space on the socket. >+ * Accounting for the association is done in ulpevent.c >+ * We set this as a destructor for the cloned data skbs so that >+ * accounting is done at the correct time. >+ */ >+void sctp_sock_rfree(struct sk_buff *skb) >+{ >+ struct sock *sk = skb->sk; >+ struct sctp_ulpevent *event = sctp_skb2event(skb); >+ >+ atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); >+ >+ /* >+ * Mimic the behavior of sk_stream_rfree >+ */ >+ sk->sk_forward_alloc += event->rmem_len; >+ >+} >+ >+ > /* Helper function to wait for space in the sndbuf. */ > static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, > size_t msg_len) >@@ -4537,6 +4584,36 @@ > finish_wait(sk->sk_sleep, &wait); > } > >+static void sctp_sock_rfree_frag(struct sk_buff *skb) >+{ >+ struct sk_buff *frag; >+ >+ if (!skb->data_len) >+ goto done; >+ >+ /* Don't forget the fragments. */ >+ for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) >+ sctp_sock_rfree_frag(frag); >+ >+done: >+ sctp_sock_rfree(skb); >+} >+ >+static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) >+{ >+ struct sk_buff *frag; >+ >+ if (!skb->data_len) >+ goto done; >+ >+ /* Don't forget the fragments. */ >+ for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) >+ sctp_skb_set_owner_r_frag(frag, sk); >+ >+done: >+ sctp_skb_set_owner_r(skb, sk); >+} >+ > /* Populate the fields of the newsk from the oldsk and migrate the assoc > * and its messages to the newsk. > */ >@@ -4589,10 +4666,10 @@ > sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { > event = sctp_skb2event(skb); > if (event->asoc == assoc) { >- sock_rfree(skb); >+ sctp_sock_rfree_frag(skb); > __skb_unlink(skb, skb->list); > __skb_queue_tail(&newsk->sk_receive_queue, skb); >- skb_set_owner_r(skb, newsk); >+ sctp_skb_set_owner_r_frag(skb, newsk); > } > } > >@@ -4620,10 +4697,10 @@ > sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { > event = sctp_skb2event(skb); > if (event->asoc == assoc) { >- sock_rfree(skb); >+ sctp_sock_rfree_frag(skb); > __skb_unlink(skb, skb->list); > __skb_queue_tail(queue, skb); >- skb_set_owner_r(skb, newsk); >+ sctp_skb_set_owner_r_frag(skb, newsk); > } > } > >@@ -4660,6 +4737,7 @@ > sctp_release_sock(newsk); > } > >+ > /* This proto struct describes the ULP interface for SCTP. */ > struct proto sctp_prot = { > .name = "SCTP", >@@ -4681,6 +4759,12 @@ > .unhash = sctp_unhash, > .get_port = sctp_get_port, > .slab_obj_size = sizeof(struct sctp_sock), >+ .sysctl_mem = sysctl_sctp_mem, >+ .sysctl_rmem = sysctl_sctp_rmem, >+ .sysctl_wmem = sysctl_sctp_wmem, >+ .memory_pressure = &sctp_memory_pressure, >+ .enter_memory_pressure = sctp_enter_memory_pressure, >+ .memory_allocated = &sctp_memory_allocated, > }; > > #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) >@@ -4704,5 +4788,11 @@ > .unhash = sctp_unhash, > .get_port = sctp_get_port, > .slab_obj_size = sizeof(struct sctp6_sock), >+ .sysctl_mem = sysctl_sctp_mem, >+ .sysctl_rmem = sysctl_sctp_rmem, >+ .sysctl_wmem = sysctl_sctp_wmem, >+ .memory_pressure = &sctp_memory_pressure, >+ .enter_memory_pressure = sctp_enter_memory_pressure, >+ .memory_allocated = &sctp_memory_allocated, > }; > #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ >--- linux-2.6.9/net/sctp/sm_statefuns.c.orig 2007-07-20 09:44:16.000000000 -0400 >+++ linux-2.6.9/net/sctp/sm_statefuns.c 2007-07-20 09:46:51.000000000 -0400 >@@ -4646,7 +4646,6 @@ > __u16 num_gaps; > struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; > struct sock *sk = asoc->base.sk; >- int rcvbuf_over = 0; > > data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; > skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); >@@ -4656,49 +4655,6 @@ > > /* ASSERT: Now skb->data is really the user data. */ > >- /* >- * if we are established, and we have used up our receive >- * buffer memory, think about droping the frame >- * Note that we have an opportunity to improve performance here >- * if we accept one chunk from an skbuf, we have to keep all the memory >- * of that skbuff around until the chunk is read into user space >- * therefore, once we accept 1 chunk we may as well accept all remaining >- * chunks in the skbuff. The data_accepted flag helps us do that >- */ >- if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { >- /* >- * If the receive buffer policy is 1, then each >- * association can allocate up to sk_rcvbuf bytes >- * otherwise, all the associations in aggregate >- * may allocate up to sk_rcvbuf bytes >- */ >- if (asoc->ep->rcvbuf_policy) >- account_value = atomic_read(&asoc->rmem_alloc); >- else >- account_value = atomic_read(&sk->sk_rmem_alloc); >- if (account_value > sk->sk_rcvbuf) { >- >- /* >- * We need to make forward progress, even when we are >- * under memory pressure, so we always allow the >- * next tsn after the ctsn ack point to be accepted. >- * This lets us avoid deadlocks in which we have to >- * drop frames that would otherwise let us drain the >- * receive queue >- */ >- if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) >- return SCTP_IERROR_IGNORE_TSN; >- >- /* >- * we're going to accept the frame >- * but we should renege to make space for it >- * this will send us down that path later >- * in this function >- */ >- rcvbuf_over = 1; >- } >- } >- > /* Process ECN based congestion. > * > * Since the chunk structure is reused for all chunks within >@@ -4759,24 +4715,11 @@ > * seems a bit troublesome in that frag_point varies based on > * PMTU. In cases, such as loopback, this might be a rather > * large spill over. >- * NOTE: If we have a full receive buffer here, we only renege if >- * our receiver can still make progress without the tsn being >- * received. We do this because in the event that the associations >- * receive queue is empty we are filling a leading gap, and since reneging >- * moves the gap to the end of the tsn stream, we are likely to stall again >- * very shortly. avoiding the renege when we fill a leading gap is a >- * good heuristic for avoiding such steady state stalls. >- */ >- if (!asoc->rwnd || asoc->rwnd_over || >- (datalen > asoc->rwnd + asoc->frag_point) || >- (rcvbuf_over && (!skb_queue_len(&sk->sk_receive_queue)))) { >- >- /* If this is the next TSN, consider reneging to make >- * room. Note: Playing nice with a confused sender. A >- * malicious sender can still eat up all our buffer >- * space and in the future we may want to detect and >- * do more drastic reneging. >- */ >+ */ >+ >+ if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || >+ (datalen > asoc->rwnd + asoc->frag_point))) { >+ > if (sctp_tsnmap_has_gap(map) && > (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { > SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); >@@ -4790,6 +4733,21 @@ > } > > /* >+ * Also try to renege to limit our memory usage in the event that >+ * we are under memory pressure >+ * If we can't renege, don't worry about it, the sk_stream_rmem_schedule >+ * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our >+ * memory usage too much >+ */ >+ if (*sk->sk_prot->memory_pressure) { >+ if (sctp_tsnmap_has_gap(map) && >+ (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { >+ SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn); >+ deliver = SCTP_CMD_RENEGE; >+ } >+ } >+ >+ /* > * Section 3.3.10.9 No User Data (9) > * > * Cause of error >--- linux-2.6.9/net/sctp/endpointola.c.orig 2007-07-20 09:44:15.000000000 -0400 >+++ linux-2.6.9/net/sctp/endpointola.c 2007-07-20 09:46:18.000000000 -0400 >@@ -149,6 +149,7 @@ > > /* Use SCTP specific send buffer space queues. */ > ep->sndbuf_policy = sctp_sndbuf_policy; >+ > sk->sk_write_space = sctp_write_space; > sk->sk_use_write_queue = 1; > >--- linux-2.6.9/net/sctp/protocol.c.orig 2007-07-20 09:44:16.000000000 -0400 >+++ linux-2.6.9/net/sctp/protocol.c 2007-07-20 09:46:18.000000000 -0400 >@@ -51,6 +51,7 @@ > #include <linux/netdevice.h> > #include <linux/inetdevice.h> > #include <linux/seq_file.h> >+#include <linux/bootmem.h> > #include <net/protocol.h> > #include <net/ip.h> > #include <net/ipv6.h> >@@ -88,6 +89,10 @@ > extern int sctp_assocs_proc_init(void); > extern int sctp_assocs_proc_exit(void); > >+extern int sysctl_sctp_mem[3]; >+extern int sysctl_sctp_rmem[3]; >+extern int sysctl_sctp_wmem[3]; >+ > /* Return the address of the control sock. */ > struct sock *sctp_get_ctl_sock(void) > { >@@ -967,6 +972,8 @@ > int i; > int status = -EINVAL; > unsigned long goal; >+ unsigned long limit; >+ int max_share; > int order; > > /* SCTP_DEBUG sanity check. */ >@@ -1068,6 +1075,31 @@ > /* Initialize handle used for association ids. */ > idr_init(&sctp_assocs_id); > >+ /* Set the pressure threshold to be a fraction of global memory that >+ * is up to 1/2 at 256 MB, decreasing toward zero with the amount of >+ * memory, with a floor of 128 pages. >+ * Note this initalizes the data in sctpv6_prot too >+ * Unabashedly stolen from tcp_init >+ */ >+ limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); >+ limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); >+ limit = max(limit, 128UL); >+ sysctl_sctp_mem[0] = limit / 4 * 3; >+ sysctl_sctp_mem[1] = limit; >+ sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; >+ >+ /* Set per-socket limits to no more than 1/128 the pressure threshold*/ >+ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); >+ max_share = min(4UL*1024*1024, limit); >+ >+ sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ >+ sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); >+ sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); >+ >+ sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; >+ sysctl_sctp_wmem[1] = 16*1024; >+ sysctl_sctp_wmem[2] = max(64*1024, max_share); >+ > /* Size and allocate the association hash table. > * The methodology is similar to that of the tcp hash tables. > */ >--- linux-2.6.9/net/sctp/ulpqueue.c.orig 2007-07-20 09:44:16.000000000 -0400 >+++ linux-2.6.9/net/sctp/ulpqueue.c 2007-07-20 09:46:18.000000000 -0400 >@@ -322,7 +322,7 @@ > if (!new) > return NULL; /* try again later */ > >- new->sk = f_frag->sk; >+ sctp_skb_set_owner_r(new, f_frag->sk); > > skb_shinfo(new)->frag_list = pos; > } else >@@ -878,6 +878,7 @@ > sctp_ulpq_partial_delivery(ulpq, chunk, gfp); > } > >+ sk_stream_mem_reclaim(asoc->base.sk); > return; > } > >--- linux-2.6.9/net/sctp/ulpevent.c.orig 2007-07-20 09:44:15.000000000 -0400 >+++ linux-2.6.9/net/sctp/ulpevent.c 2007-07-20 09:49:24.000000000 -0400 >@@ -63,7 +63,7 @@ > goto fail; > > event = sctp_skb2event(skb); >- sctp_ulpevent_init(event, msg_flags); >+ sctp_ulpevent_init(event, msg_flags, skb->truesize); > > return event; > >@@ -72,10 +72,12 @@ > } > > /* Initialize an ULP event from an given skb. */ >-void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) >+void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags, >+ unsigned int len) > { > memset(event, 0, sizeof(struct sctp_ulpevent)); > event->msg_flags = msg_flags; >+ event->rmem_len = len; > } > > /* Is this a MSG_NOTIFICATION? */ >@@ -98,17 +100,16 @@ > sctp_association_hold((struct sctp_association *)asoc); > skb = sctp_event2skb(event); > event->asoc = (struct sctp_association *)asoc; >- atomic_add(skb->truesize, &event->asoc->rmem_alloc); >- skb_set_owner_r(skb, asoc->base.sk); >+ atomic_add(event->rmem_len, &event->asoc->rmem_alloc); >+ sctp_skb_set_owner_r(skb, asoc->base.sk); > } > > /* A simple destructor to give up the reference to the association. */ > static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) > { > struct sctp_association *asoc = event->asoc; >- struct sk_buff *skb = sctp_event2skb(event); > >- atomic_sub(skb->truesize, &asoc->rmem_alloc); >+ atomic_sub(event->rmem_len, &asoc->rmem_alloc); > sctp_association_put(asoc); > } > >@@ -369,7 +370,7 @@ > > /* Embed the event fields inside the cloned skb. */ > event = sctp_skb2event(skb); >- sctp_ulpevent_init(event, MSG_NOTIFICATION); >+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); > > sre = (struct sctp_remote_error *) > skb_push(skb, sizeof(struct sctp_remote_error)); >@@ -461,7 +462,7 @@ > > /* Embed the event fields inside the cloned skb. */ > event = sctp_skb2event(skb); >- sctp_ulpevent_init(event, MSG_NOTIFICATION); >+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); > > ssf = (struct sctp_send_failed *) > skb_push(skb, sizeof(struct sctp_send_failed)); >@@ -618,6 +619,24 @@ > struct sctp_ulpevent *event = NULL; > struct sk_buff *skb; > size_t padding, len; >+ int rx_count; >+ >+ /* >+ * check to see if we need to make space for this >+ * new skb, expand the rcvbuffer if needed, or drop >+ * the frame >+ */ >+ if (asoc->ep->rcvbuf_policy) >+ rx_count = atomic_read(&asoc->rmem_alloc); >+ else >+ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); >+ >+ if (rx_count >= asoc->base.sk->sk_rcvbuf) { >+ >+ if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || >+ (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb))) >+ goto fail; >+ } > > /* Clone the original skb, sharing the data. */ > skb = skb_clone(chunk->skb, gfp); >@@ -646,7 +665,7 @@ > event = sctp_skb2event(skb); > > /* Initialize event with flags 0. */ >- sctp_ulpevent_init(event, 0); >+ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); > > sctp_ulpevent_receive_data(event, asoc); > >--- linux-2.6.9/net/sctp/sysctl.c.orig 2007-07-20 09:44:15.000000000 -0400 >+++ linux-2.6.9/net/sctp/sysctl.c 2007-07-20 09:46:18.000000000 -0400 >@@ -48,6 +48,15 @@ > static long rto_timer_min = 1; > static long rto_timer_max = 86400000; /* One day */ > >+int sysctl_sctp_mem[3]; >+int sysctl_sctp_rmem[3]; >+int sysctl_sctp_wmem[3]; >+ >+/* >+ * per assoc memory limitationf for sends >+ */ >+int sysctl_sctp_wmem[3]; >+ > static ctl_table sctp_table[] = { > { > .ctl_name = NET_SCTP_RTO_INITIAL, >@@ -205,6 +214,30 @@ > .mode = 0555, > .child = sctp_table > }, >+ { >+ .ctl_name = NET_SCTP_BUF_MEM, >+ .procname = "sctp_mem", >+ .data = &sysctl_sctp_mem, >+ .maxlen = sizeof(sysctl_sctp_mem), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec, >+ }, >+ { >+ .ctl_name = NET_SCTP_BUF_RMEM, >+ .procname = "sctp_rmem", >+ .data = &sysctl_sctp_rmem, >+ .maxlen = sizeof(sysctl_sctp_rmem), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec, >+ }, >+ { >+ .ctl_name = NET_SCTP_BUF_WMEM, >+ .procname = "sctp_wmem", >+ .data = &sysctl_sctp_wmem, >+ .maxlen = sizeof(sysctl_sctp_wmem), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec, >+ }, > { .ctl_name = 0 } > }; >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 249316
: 159798