From d6e711448137ca3301512cec41a2c2ce852b3d0a Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Thu, 23 Jun 2005 00:09:43 -0700 Subject: [PATCH] setuid core dump Add a new `suid_dumpable' sysctl: This value can be used to query and set the core dump mode for setuid or otherwise protected/tainted binaries. The modes are 0 - (default) - traditional behaviour. Any process which has changed privilege levels or is execute only will not be dumped 1 - (debug) - all processes dump core when possible. The core dump is owned by the current user and no security is applied. This is intended for system debugging situations only. Ptrace is unchecked. 2 - (suidsafe) - any binary which normally would not be dumped is dumped readable by root only. This allows the end user to remove such a dump but not access it directly. For security reasons core dumps in this mode will not overwrite one another or other files. This mode is appropriate when adminstrators are attempting to debug problems in a normal environment. (akpm: > > +EXPORT_SYMBOL(suid_dumpable); > > EXPORT_SYMBOL_GPL? No problem to me. > > if (current->euid == current->uid && current->egid == current->gid) > > current->mm->dumpable = 1; > > Should this be SUID_DUMP_USER? Actually the feedback I had from last time was that the SUID_ defines should go because its clearer to follow the numbers. They can go everywhere (and there are lots of places where dumpable is tested/used as a bool in untouched code) > Maybe this should be renamed to `dump_policy' or something. Doing that > would help us catch any code which isn't using the #defines, too. Fair comment. The patch was designed to be easy to maintain for Red Hat rather than for merging. Changing that field would create a gigantic diff because it is used all over the place. ) Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sysctl.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/sysctl.h') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index a17745c80a91..614e939c78a4 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -136,6 +136,7 @@ enum KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */ KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */ KERN_RANDOMIZE=68, /* int: randomize virtual address space */ + KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ }; -- cgit v1.2.3 From 317a76f9a44b437d6301718f4e5d08bd93f98da7 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 23 Jun 2005 12:19:55 -0700 Subject: [TCP]: Add pluggable congestion control algorithm infrastructure. Allow TCP to have multiple pluggable congestion control algorithms. Algorithms are defined by a set of operations and can be built in or modules. The legacy "new RENO" algorithm is used as a starting point and fallback. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/sysctl.h | 9 +- include/linux/tcp.h | 49 +-- include/net/tcp.h | 237 +++++---------- net/ipv4/Makefile | 3 +- net/ipv4/sysctl_net_ipv4.c | 114 +++---- net/ipv4/tcp.c | 2 + net/ipv4/tcp_cong.c | 195 ++++++++++++ net/ipv4/tcp_diag.c | 20 +- net/ipv4/tcp_input.c | 737 ++++----------------------------------------- net/ipv4/tcp_ipv4.c | 3 + net/ipv4/tcp_minisocks.c | 4 +- net/ipv4/tcp_output.c | 23 +- net/ipv6/tcp_ipv6.c | 2 +- 13 files changed, 399 insertions(+), 999 deletions(-) create mode 100644 net/ipv4/tcp_cong.c (limited to 'include/linux/sysctl.h') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 614e939c78a4..72965bfe6cfb 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -333,21 +333,14 @@ enum NET_TCP_FRTO=92, NET_TCP_LOW_LATENCY=93, NET_IPV4_IPFRAG_SECRET_INTERVAL=94, - NET_TCP_WESTWOOD=95, NET_IPV4_IGMP_MAX_MSF=96, NET_TCP_NO_METRICS_SAVE=97, - NET_TCP_VEGAS=98, - NET_TCP_VEGAS_ALPHA=99, - NET_TCP_VEGAS_BETA=100, - NET_TCP_VEGAS_GAMMA=101, - NET_TCP_BIC=102, - NET_TCP_BIC_FAST_CONVERGENCE=103, - NET_TCP_BIC_LOW_WINDOW=104, NET_TCP_DEFAULT_WIN_SCALE=105, NET_TCP_MODERATE_RCVBUF=106, NET_TCP_TSO_WIN_DIVISOR=107, NET_TCP_BIC_BETA=108, NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, + NET_TCP_CONG_CONTROL=110, }; enum { diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 97a7c9e03df5..3ea75dd6640a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -203,13 +203,6 @@ struct tcp_sack_block { __u32 end_seq; }; -enum tcp_congestion_algo { - TCP_RENO=0, - TCP_VEGAS, - TCP_WESTWOOD, - TCP_BIC, -}; - struct tcp_options_received { /* PAWS/RTTM data */ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ @@ -305,7 +298,7 @@ struct tcp_sock { __u8 reordering; /* Packet reordering metric. */ __u8 frto_counter; /* Number of new acks after RTO */ - __u8 adv_cong; /* Using Vegas, Westwood, or BIC */ + __u8 unused; __u8 defer_accept; /* User waits for some data after accept() */ /* RTT measurement */ @@ -401,37 +394,10 @@ struct tcp_sock { __u32 time; } rcvq_space; -/* TCP Westwood structure */ - struct { - __u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ - __u32 bw_est; /* bandwidth estimate */ - __u32 rtt_win_sx; /* here starts a new evaluation... */ - __u32 bk; - __u32 snd_una; /* used for evaluating the number of acked bytes */ - __u32 cumul_ack; - __u32 accounted; - __u32 rtt; - __u32 rtt_min; /* minimum observed RTT */ - } westwood; - -/* Vegas variables */ - struct { - __u32 beg_snd_nxt; /* right edge during last RTT */ - __u32 beg_snd_una; /* left edge during last RTT */ - __u32 beg_snd_cwnd; /* saves the size of the cwnd */ - __u8 doing_vegas_now;/* if true, do vegas for this RTT */ - __u16 cntRTT; /* # of RTTs measured within last RTT */ - __u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ - __u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ - } vegas; - - /* BI TCP Parameters */ - struct { - __u32 cnt; /* increase cwnd by 1 after this number of ACKs */ - __u32 last_max_cwnd; /* last maximium snd_cwnd */ - __u32 last_cwnd; /* the last snd_cwnd */ - __u32 last_stamp; /* time when updated last_cwnd */ - } bictcp; + /* Pluggable TCP congestion control hook */ + struct tcp_congestion_ops *ca_ops; + u32 ca_priv[16]; +#define TCP_CA_PRIV_SIZE (16*sizeof(u32)) }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) @@ -439,6 +405,11 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) return (struct tcp_sock *)sk; } +static inline void *tcp_ca(const struct tcp_sock *tp) +{ + return (void *) tp->ca_priv; +} + #endif #endif /* _LINUX_TCP_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index f730935b824a..e427cf35915c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -505,25 +505,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) #else # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) #endif - -#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation - * max_cwnd = snd_cwnd * beta - */ -#define BICTCP_MAX_INCREMENT 32 /* - * Limit on the amount of - * increment allowed during - * binary search. - */ -#define BICTCP_FUNC_OF_MIN_INCR 11 /* - * log(B/Smin)/log(B/(B-1))+1, - * Smin:min increment - * B:log factor - */ -#define BICTCP_B 4 /* - * In binary search, - * go to point (max+min)/N - */ - /* * TCP option */ @@ -596,16 +577,7 @@ extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; -extern int sysctl_tcp_westwood; -extern int sysctl_tcp_vegas_cong_avoid; -extern int sysctl_tcp_vegas_alpha; -extern int sysctl_tcp_vegas_beta; -extern int sysctl_tcp_vegas_gamma; extern int sysctl_tcp_nometrics_save; -extern int sysctl_tcp_bic; -extern int sysctl_tcp_bic_fast_convergence; -extern int sysctl_tcp_bic_low_window; -extern int sysctl_tcp_bic_beta; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; @@ -1136,6 +1108,80 @@ static inline void tcp_packets_out_dec(struct tcp_sock *tp, tp->packets_out -= tcp_skb_pcount(skb); } +/* Events passed to congestion control interface */ +enum tcp_ca_event { + CA_EVENT_TX_START, /* first transmit when no packets in flight */ + CA_EVENT_CWND_RESTART, /* congestion window restart */ + CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ + CA_EVENT_FRTO, /* fast recovery timeout */ + CA_EVENT_LOSS, /* loss timeout */ + CA_EVENT_FAST_ACK, /* in sequence ack */ + CA_EVENT_SLOW_ACK, /* other ack */ +}; + +/* + * Interface for adding new TCP congestion control handlers + */ +#define TCP_CA_NAME_MAX 16 +struct tcp_congestion_ops { + struct list_head list; + + /* initialize private data (optional) */ + void (*init)(struct tcp_sock *tp); + /* cleanup private data (optional) */ + void (*release)(struct tcp_sock *tp); + + /* return slow start threshold (required) */ + u32 (*ssthresh)(struct tcp_sock *tp); + /* lower bound for congestion window (optional) */ + u32 (*min_cwnd)(struct tcp_sock *tp); + /* do new cwnd calculation (required) */ + void (*cong_avoid)(struct tcp_sock *tp, u32 ack, + u32 rtt, u32 in_flight, int good_ack); + /* round trip time sample per acked packet (optional) */ + void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt); + /* call before changing ca_state (optional) */ + void (*set_state)(struct tcp_sock *tp, u8 new_state); + /* call when cwnd event occurs (optional) */ + void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev); + /* new value of cwnd after loss (optional) */ + u32 (*undo_cwnd)(struct tcp_sock *tp); + /* hook for packet ack accounting (optional) */ + void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked); + /* get info for tcp_diag (optional) */ + void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb); + + char name[TCP_CA_NAME_MAX]; + struct module *owner; +}; + +extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); +extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); + +extern void tcp_init_congestion_control(struct tcp_sock *tp); +extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); +extern int tcp_set_default_congestion_control(const char *name); +extern void tcp_get_default_congestion_control(char *name); + +extern struct tcp_congestion_ops tcp_reno; +extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); +extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, + u32 rtt, u32 in_flight, int flag); +extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); + +static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) +{ + if (tp->ca_ops->set_state) + tp->ca_ops->set_state(tp, ca_state); + tp->ca_state = ca_state; +} + +static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event) +{ + if (tp->ca_ops->cwnd_event) + tp->ca_ops->cwnd_event(tp, event); +} + /* This determines how many packets are "in the network" to the best * of our knowledge. In many cases it is conservative, but where * detailed information is available from the receiver (via SACK @@ -1155,91 +1201,6 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) return (tp->packets_out - tp->left_out + tp->retrans_out); } -/* - * Which congestion algorithim is in use on the connection. - */ -#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS) -#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD) -#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC) - -/* Recalculate snd_ssthresh, we want to set it to: - * - * Reno: - * one half the current congestion window, but no - * less than two segments - * - * BIC: - * behave like Reno until low_window is reached, - * then increase congestion window slowly - */ -static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp) -{ - if (tcp_is_bic(tp)) { - if (sysctl_tcp_bic_fast_convergence && - tp->snd_cwnd < tp->bictcp.last_max_cwnd) - tp->bictcp.last_max_cwnd = (tp->snd_cwnd * - (BICTCP_BETA_SCALE - + sysctl_tcp_bic_beta)) - / (2 * BICTCP_BETA_SCALE); - else - tp->bictcp.last_max_cwnd = tp->snd_cwnd; - - if (tp->snd_cwnd > sysctl_tcp_bic_low_window) - return max((tp->snd_cwnd * sysctl_tcp_bic_beta) - / BICTCP_BETA_SCALE, 2U); - } - - return max(tp->snd_cwnd >> 1U, 2U); -} - -/* Stop taking Vegas samples for now. */ -#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0) - -static inline void tcp_vegas_enable(struct tcp_sock *tp) -{ - /* There are several situations when we must "re-start" Vegas: - * - * o when a connection is established - * o after an RTO - * o after fast recovery - * o when we send a packet and there is no outstanding - * unacknowledged data (restarting an idle connection) - * - * In these circumstances we cannot do a Vegas calculation at the - * end of the first RTT, because any calculation we do is using - * stale info -- both the saved cwnd and congestion feedback are - * stale. - * - * Instead we must wait until the completion of an RTT during - * which we actually receive ACKs. - */ - - /* Begin taking Vegas samples next time we send something. */ - tp->vegas.doing_vegas_now = 1; - - /* Set the beginning of the next send window. */ - tp->vegas.beg_snd_nxt = tp->snd_nxt; - - tp->vegas.cntRTT = 0; - tp->vegas.minRTT = 0x7fffffff; -} - -/* Should we be taking Vegas samples right now? */ -#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now) - -extern void tcp_ca_init(struct tcp_sock *tp); - -static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) -{ - if (tcp_is_vegas(tp)) { - if (ca_state == TCP_CA_Open) - tcp_vegas_enable(tp); - else - tcp_vegas_disable(tp); - } - tp->ca_state = ca_state; -} - /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. @@ -1288,7 +1249,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) static inline void __tcp_enter_cwr(struct tcp_sock *tp) { tp->undo_marker = 0; - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1U); tp->snd_cwnd_cnt = 0; @@ -1876,52 +1837,4 @@ struct tcp_iter_state { extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); -/* TCP Westwood functions and constants */ - -#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ -#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ - -static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq) -{ - if (tcp_is_westwood(tp)) - tp->westwood.rtt = rtt_seq; -} - -static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp) -{ - return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) / - (__u32) (tp->mss_cache_std), - 2U); -} - -static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp) -{ - return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0; -} - -static inline int tcp_westwood_ssthresh(struct tcp_sock *tp) -{ - __u32 ssthresh = 0; - - if (tcp_is_westwood(tp)) { - ssthresh = __tcp_westwood_bw_rttmin(tp); - if (ssthresh) - tp->snd_ssthresh = ssthresh; - } - - return (ssthresh != 0); -} - -static inline int tcp_westwood_cwnd(struct tcp_sock *tp) -{ - __u32 cwnd = 0; - - if (tcp_is_westwood(tp)) { - cwnd = __tcp_westwood_bw_rttmin(tp); - if (cwnd) - tp->snd_cwnd = cwnd; - } - - return (cwnd != 0); -} #endif /* _TCP_H */ diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 65d57d8e1add..89c0b4cb470e 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -5,7 +5,8 @@ obj-y := utils.o route.o inetpeer.o protocol.o \ ip_input.o ip_fragment.o ip_forward.o ip_options.o \ ip_output.o ip_sockglue.o \ - tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o tcp_minisocks.o \ + tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ + tcp_minisocks.o tcp_cong.o \ datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \ sysctl_net_ipv4.o fib_frontend.o fib_semantics.o diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 23068bddbf0b..e32894532416 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -118,6 +118,45 @@ static int ipv4_sysctl_forward_strategy(ctl_table *table, return 1; } +static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * filp, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + char val[TCP_CA_NAME_MAX]; + ctl_table tbl = { + .data = val, + .maxlen = TCP_CA_NAME_MAX, + }; + int ret; + + tcp_get_default_congestion_control(val); + + ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); + if (write && ret == 0) + ret = tcp_set_default_congestion_control(val); + return ret; +} + +int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, int nlen, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen, + void **context) +{ + char val[TCP_CA_NAME_MAX]; + ctl_table tbl = { + .data = val, + .maxlen = TCP_CA_NAME_MAX, + }; + int ret; + + tcp_get_default_congestion_control(val); + ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen, + context); + if (ret == 0 && newval && newlen) + ret = tcp_set_default_congestion_control(val); + return ret; +} + + ctl_table ipv4_table[] = { { .ctl_name = NET_IPV4_TCP_TIMESTAMPS, @@ -611,70 +650,6 @@ ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, - { - .ctl_name = NET_TCP_WESTWOOD, - .procname = "tcp_westwood", - .data = &sysctl_tcp_westwood, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS, - .procname = "tcp_vegas_cong_avoid", - .data = &sysctl_tcp_vegas_cong_avoid, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_ALPHA, - .procname = "tcp_vegas_alpha", - .data = &sysctl_tcp_vegas_alpha, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_BETA, - .procname = "tcp_vegas_beta", - .data = &sysctl_tcp_vegas_beta, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_VEGAS_GAMMA, - .procname = "tcp_vegas_gamma", - .data = &sysctl_tcp_vegas_gamma, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC, - .procname = "tcp_bic", - .data = &sysctl_tcp_bic, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC_FAST_CONVERGENCE, - .procname = "tcp_bic_fast_convergence", - .data = &sysctl_tcp_bic_fast_convergence, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_TCP_BIC_LOW_WINDOW, - .procname = "tcp_bic_low_window", - .data = &sysctl_tcp_bic_low_window, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, { .ctl_name = NET_TCP_MODERATE_RCVBUF, .procname = "tcp_moderate_rcvbuf", @@ -692,13 +667,14 @@ ctl_table ipv4_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_TCP_BIC_BETA, - .procname = "tcp_bic_beta", - .data = &sysctl_tcp_bic_beta, - .maxlen = sizeof(int), + .ctl_name = NET_TCP_CONG_CONTROL, + .procname = "tcp_congestion_control", .mode = 0644, - .proc_handler = &proc_dointvec, + .maxlen = TCP_CA_NAME_MAX, + .proc_handler = &proc_tcp_congestion_control, + .strategy = &sysctl_tcp_congestion_control, }, + { .ctl_name = 0 } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 674bbd8cfd36..f3dbc8dc1263 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2333,6 +2333,8 @@ void __init tcp_init(void) printk(KERN_INFO "TCP: Hash tables configured " "(established %d bind %d)\n", tcp_ehash_size << 1, tcp_bhash_size); + + tcp_register_congestion_control(&tcp_reno); } EXPORT_SYMBOL(tcp_accept); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c new file mode 100644 index 000000000000..665394a63ae4 --- /dev/null +++ b/net/ipv4/tcp_cong.c @@ -0,0 +1,195 @@ +/* + * Plugable TCP congestion control support and newReno + * congestion control. + * Based on ideas from I/O scheduler suport and Web100. + * + * Copyright (C) 2005 Stephen Hemminger + */ + +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(tcp_cong_list_lock); +static LIST_HEAD(tcp_cong_list); + +/* Simple linear search, don't expect many entries! */ +static struct tcp_congestion_ops *tcp_ca_find(const char *name) +{ + struct tcp_congestion_ops *e; + + list_for_each_entry(e, &tcp_cong_list, list) { + if (strcmp(e->name, name) == 0) + return e; + } + + return NULL; +} + +/* + * Attach new congestion control algorthim to the list + * of available options. + */ +int tcp_register_congestion_control(struct tcp_congestion_ops *ca) +{ + int ret = 0; + + /* all algorithms must implement ssthresh and cong_avoid ops */ + if (!ca->ssthresh || !ca->cong_avoid || !ca->min_cwnd) { + printk(KERN_ERR "TCP %s does not implement required ops\n", + ca->name); + return -EINVAL; + } + + spin_lock(&tcp_cong_list_lock); + if (tcp_ca_find(ca->name)) { + printk(KERN_NOTICE "TCP %s already registered\n", ca->name); + ret = -EEXIST; + } else { + list_add_rcu(&ca->list, &tcp_cong_list); + printk(KERN_INFO "TCP %s registered\n", ca->name); + } + spin_unlock(&tcp_cong_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(tcp_register_congestion_control); + +/* + * Remove congestion control algorithm, called from + * the module's remove function. Module ref counts are used + * to ensure that this can't be done till all sockets using + * that method are closed. + */ +void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) +{ + spin_lock(&tcp_cong_list_lock); + list_del_rcu(&ca->list); + spin_unlock(&tcp_cong_list_lock); +} +EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); + +/* Assign choice of congestion control. */ +void tcp_init_congestion_control(struct tcp_sock *tp) +{ + struct tcp_congestion_ops *ca; + + rcu_read_lock(); + list_for_each_entry_rcu(ca, &tcp_cong_list, list) { + if (try_module_get(ca->owner)) { + tp->ca_ops = ca; + break; + } + + } + rcu_read_unlock(); + + if (tp->ca_ops->init) + tp->ca_ops->init(tp); +} + +/* Manage refcounts on socket close. */ +void tcp_cleanup_congestion_control(struct tcp_sock *tp) +{ + if (tp->ca_ops->release) + tp->ca_ops->release(tp); + module_put(tp->ca_ops->owner); +} + +/* Used by sysctl to change default congestion control */ +int tcp_set_default_congestion_control(const char *name) +{ + struct tcp_congestion_ops *ca; + int ret = -ENOENT; + + spin_lock(&tcp_cong_list_lock); + ca = tcp_ca_find(name); +#ifdef CONFIG_KMOD + if (!ca) { + spin_unlock(&tcp_cong_list_lock); + + request_module("tcp_%s", name); + spin_lock(&tcp_cong_list_lock); + ca = tcp_ca_find(name); + } +#endif + + if (ca) { + list_move(&ca->list, &tcp_cong_list); + ret = 0; + } + spin_unlock(&tcp_cong_list_lock); + + return ret; +} + +/* Get current default congestion control */ +void tcp_get_default_congestion_control(char *name) +{ + struct tcp_congestion_ops *ca; + /* We will always have reno... */ + BUG_ON(list_empty(&tcp_cong_list)); + + rcu_read_lock(); + ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); + strncpy(name, ca->name, TCP_CA_NAME_MAX); + rcu_read_unlock(); +} + +/* + * TCP Reno congestion control + * This is special case used for fallback as well. + */ +/* This is Jacobson's slow start and congestion avoidance. + * SIGCOMM '88, p. 328. + */ +void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight, + int flag) +{ + if (in_flight < tp->snd_cwnd) + return; + + if (tp->snd_cwnd <= tp->snd_ssthresh) { + /* In "safe" area, increase. */ + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } else { + /* In dangerous area, increase slowly. + * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd + */ + if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + tp->snd_cwnd_cnt = 0; + } else + tp->snd_cwnd_cnt++; + } +} +EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); + +/* Slow start threshold is half the congestion window (min 2) */ +u32 tcp_reno_ssthresh(struct tcp_sock *tp) +{ + return max(tp->snd_cwnd >> 1U, 2U); +} +EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); + +/* Lower bound on congestion window. */ +u32 tcp_reno_min_cwnd(struct tcp_sock *tp) +{ + return tp->snd_ssthresh/2; +} +EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); + +struct tcp_congestion_ops tcp_reno = { + .name = "reno", + .owner = THIS_MODULE, + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .min_cwnd = tcp_reno_min_cwnd, +}; + +EXPORT_SYMBOL_GPL(tcp_reno); diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 634befc07921..867acc0f79d8 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -42,7 +42,6 @@ struct tcpdiag_entry static struct sock *tcpnl; - #define TCPDIAG_PUT(skb, attrtype, attrlen) \ ({ int rtalen = RTA_LENGTH(attrlen); \ struct rtattr *rta; \ @@ -61,7 +60,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, struct nlmsghdr *nlh; struct tcp_info *info = NULL; struct tcpdiag_meminfo *minfo = NULL; - struct tcpvegas_info *vinfo = NULL; unsigned char *b = skb->tail; nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r)); @@ -73,9 +71,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, if (ext & (1<<(TCPDIAG_INFO-1))) info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info)); - if ((tcp_is_westwood(tp) || tcp_is_vegas(tp)) - && (ext & (1<<(TCPDIAG_VEGASINFO-1)))) - vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo)); } r->tcpdiag_family = sk->sk_family; r->tcpdiag_state = sk->sk_state; @@ -166,19 +161,8 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, if (info) tcp_get_info(sk, info); - if (vinfo) { - if (tcp_is_vegas(tp)) { - vinfo->tcpv_enabled = tp->vegas.doing_vegas_now; - vinfo->tcpv_rttcnt = tp->vegas.cntRTT; - vinfo->tcpv_rtt = jiffies_to_usecs(tp->vegas.baseRTT); - vinfo->tcpv_minrtt = jiffies_to_usecs(tp->vegas.minRTT); - } else { - vinfo->tcpv_enabled = 0; - vinfo->tcpv_rttcnt = 0; - vinfo->tcpv_rtt = jiffies_to_usecs(tp->westwood.rtt); - vinfo->tcpv_minrtt = jiffies_to_usecs(tp->westwood.rtt_min); - } - } + if (sk->sk_state < TCP_TIME_WAIT && tp->ca_ops->get_info) + tp->ca_ops->get_info(tp, ext, skb); nlh->nlmsg_len = skb->tail - b; return skb->len; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5bad504630a3..7bbbbc33eb4b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -61,7 +61,6 @@ * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs - * Angelo Dell'Aera: TCP Westwood+ support */ #include @@ -88,23 +87,9 @@ int sysctl_tcp_rfc1337; int sysctl_tcp_max_orphans = NR_FILE; int sysctl_tcp_frto; int sysctl_tcp_nometrics_save; -int sysctl_tcp_westwood; -int sysctl_tcp_vegas_cong_avoid; int sysctl_tcp_moderate_rcvbuf = 1; -/* Default values of the Vegas variables, in fixed-point representation - * with V_PARAM_SHIFT bits to the right of the binary point. - */ -#define V_PARAM_SHIFT 1 -int sysctl_tcp_vegas_alpha = 1<snd_cwnd_stamp = tcp_time_stamp; } -static void init_bictcp(struct tcp_sock *tp) -{ - tp->bictcp.cnt = 0; - - tp->bictcp.last_max_cwnd = 0; - tp->bictcp.last_cwnd = 0; - tp->bictcp.last_stamp = 0; -} - /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) { @@ -558,45 +534,6 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ tcp_grow_window(sk, tp, skb); } -/* When starting a new connection, pin down the current choice of - * congestion algorithm. - */ -void tcp_ca_init(struct tcp_sock *tp) -{ - if (sysctl_tcp_westwood) - tp->adv_cong = TCP_WESTWOOD; - else if (sysctl_tcp_bic) - tp->adv_cong = TCP_BIC; - else if (sysctl_tcp_vegas_cong_avoid) { - tp->adv_cong = TCP_VEGAS; - tp->vegas.baseRTT = 0x7fffffff; - tcp_vegas_enable(tp); - } -} - -/* Do RTT sampling needed for Vegas. - * Basically we: - * o min-filter RTT samples from within an RTT to get the current - * propagation delay + queuing delay (we are min-filtering to try to - * avoid the effects of delayed ACKs) - * o min-filter RTT samples from a much longer window (forever for now) - * to find the propagation delay (baseRTT) - */ -static inline void vegas_rtt_calc(struct tcp_sock *tp, __u32 rtt) -{ - __u32 vrtt = rtt + 1; /* Never allow zero rtt or baseRTT */ - - /* Filter to find propagation delay: */ - if (vrtt < tp->vegas.baseRTT) - tp->vegas.baseRTT = vrtt; - - /* Find the min RTT during the last RTT to find - * the current prop. delay + queuing delay: - */ - tp->vegas.minRTT = min(tp->vegas.minRTT, vrtt); - tp->vegas.cntRTT++; -} - /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge @@ -606,13 +543,10 @@ static inline void vegas_rtt_calc(struct tcp_sock *tp, __u32 rtt) * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ -static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt) +static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) { long m = mrtt; /* RTT */ - if (tcp_vegas_enabled(tp)) - vegas_rtt_calc(tp, mrtt); - /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. @@ -670,7 +604,8 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt) tp->rtt_seq = tp->snd_nxt; } - tcp_westwood_update_rtt(tp, tp->srtt >> 3); + if (tp->ca_ops->rtt_sample) + tp->ca_ops->rtt_sample(tp, *usrtt); } /* Calculate rto without backoff. This is the second half of Van Jacobson's @@ -1185,8 +1120,8 @@ void tcp_enter_frto(struct sock *sk) tp->snd_una == tp->high_seq || (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(tp); - if (!tcp_westwood_ssthresh(tp)) - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); + tcp_ca_event(tp, CA_EVENT_FRTO); } /* Have to clear retransmission markers here to keep the bookkeeping @@ -1252,8 +1187,6 @@ static void tcp_enter_frto_loss(struct sock *sk) tcp_set_ca_state(tp, TCP_CA_Loss); tp->high_seq = tp->frto_highmark; TCP_ECN_queue_cwr(tp); - - init_bictcp(tp); } void tcp_clear_retrans(struct tcp_sock *tp) @@ -1283,7 +1216,8 @@ void tcp_enter_loss(struct sock *sk, int how) if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(tp); - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); + tcp_ca_event(tp, CA_EVENT_LOSS); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; @@ -1596,28 +1530,14 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) } /* Decrease cwnd each second ack. */ - static void tcp_cwnd_down(struct tcp_sock *tp) { int decr = tp->snd_cwnd_cnt + 1; - __u32 limit; - - /* - * TCP Westwood - * Here limit is evaluated as BWestimation*RTTmin (for obtaining it - * in packets we use mss_cache). If sysctl_tcp_westwood is off - * tcp_westwood_bw_rttmin() returns 0. In such case snd_ssthresh is - * still used as usual. It prevents other strange cases in which - * BWE*RTTmin could assume value 0. It should not happen but... - */ - - if (!(limit = tcp_westwood_bw_rttmin(tp))) - limit = tp->snd_ssthresh/2; tp->snd_cwnd_cnt = decr&1; decr >>= 1; - if (decr && tp->snd_cwnd > limit) + if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) tp->snd_cwnd -= decr; tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); @@ -1654,8 +1574,8 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) static void tcp_undo_cwr(struct tcp_sock *tp, int undo) { if (tp->prior_ssthresh) { - if (tcp_is_bic(tp)) - tp->snd_cwnd = max(tp->snd_cwnd, tp->bictcp.last_max_cwnd); + if (tp->ca_ops->undo_cwnd) + tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); @@ -1767,11 +1687,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) static inline void tcp_complete_cwr(struct tcp_sock *tp) { - if (tcp_westwood_cwnd(tp)) - tp->snd_ssthresh = tp->snd_cwnd; - else - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_stamp = tcp_time_stamp; + tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); } static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) @@ -1946,7 +1864,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, if (tp->ca_state < TCP_CA_CWR) { if (!(flag&FLAG_ECE)) tp->prior_ssthresh = tcp_current_ssthresh(tp); - tp->snd_ssthresh = tcp_recalc_ssthresh(tp); + tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); TCP_ECN_queue_cwr(tp); } @@ -1963,7 +1881,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Superceeds RFC1323) */ -static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) +static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) { __u32 seq_rtt; @@ -1983,13 +1901,13 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) * in window is lost... Voila. --ANK (010210) */ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; - tcp_rtt_estimator(tp, seq_rtt); + tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_set_rto(tp); tp->backoff = 0; tcp_bound_rto(tp); } -static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, int flag) +static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine @@ -2003,338 +1921,29 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, int flag) if (flag & FLAG_RETRANS_DATA_ACKED) return; - tcp_rtt_estimator(tp, seq_rtt); + tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_set_rto(tp); tp->backoff = 0; tcp_bound_rto(tp); } static inline void tcp_ack_update_rtt(struct tcp_sock *tp, - int flag, s32 seq_rtt) + int flag, s32 seq_rtt, u32 *usrtt) { /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) - tcp_ack_saw_tstamp(tp, flag); + tcp_ack_saw_tstamp(tp, usrtt, flag); else if (seq_rtt >= 0) - tcp_ack_no_tstamp(tp, seq_rtt, flag); + tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); } -/* - * Compute congestion window to use. - * - * This is from the implementation of BICTCP in - * Lison-Xu, Kahaled Harfoush, and Injog Rhee. - * "Binary Increase Congestion Control for Fast, Long Distance - * Networks" in InfoComm 2004 - * Available from: - * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf - * - * Unless BIC is enabled and congestion window is large - * this behaves the same as the original Reno. - */ -static inline __u32 bictcp_cwnd(struct tcp_sock *tp) -{ - /* orignal Reno behaviour */ - if (!tcp_is_bic(tp)) - return tp->snd_cwnd; - - if (tp->bictcp.last_cwnd == tp->snd_cwnd && - (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5)) - return tp->bictcp.cnt; - - tp->bictcp.last_cwnd = tp->snd_cwnd; - tp->bictcp.last_stamp = tcp_time_stamp; - - /* start off normal */ - if (tp->snd_cwnd <= sysctl_tcp_bic_low_window) - tp->bictcp.cnt = tp->snd_cwnd; - - /* binary increase */ - else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd) { - __u32 dist = (tp->bictcp.last_max_cwnd - tp->snd_cwnd) - / BICTCP_B; - - if (dist > BICTCP_MAX_INCREMENT) - /* linear increase */ - tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT; - else if (dist <= 1U) - /* binary search increase */ - tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR - / BICTCP_B; - else - /* binary search increase */ - tp->bictcp.cnt = tp->snd_cwnd / dist; - } else { - /* slow start amd linear increase */ - if (tp->snd_cwnd < tp->bictcp.last_max_cwnd + BICTCP_B) - /* slow start */ - tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR - / BICTCP_B; - else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd - + BICTCP_MAX_INCREMENT*(BICTCP_B-1)) - /* slow start */ - tp->bictcp.cnt = tp->snd_cwnd * (BICTCP_B-1) - / (tp->snd_cwnd-tp->bictcp.last_max_cwnd); - else - /* linear increase */ - tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT; - } - return tp->bictcp.cnt; -} - -/* This is Jacobson's slow start and congestion avoidance. - * SIGCOMM '88, p. 328. - */ -static inline void reno_cong_avoid(struct tcp_sock *tp) +static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, + u32 in_flight, int good) { - if (tp->snd_cwnd <= tp->snd_ssthresh) { - /* In "safe" area, increase. */ - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - } else { - /* In dangerous area, increase slowly. - * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd - */ - if (tp->snd_cwnd_cnt >= bictcp_cwnd(tp)) { - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - tp->snd_cwnd_cnt=0; - } else - tp->snd_cwnd_cnt++; - } + tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); tp->snd_cwnd_stamp = tcp_time_stamp; } -/* This is based on the congestion detection/avoidance scheme described in - * Lawrence S. Brakmo and Larry L. Peterson. - * "TCP Vegas: End to end congestion avoidance on a global internet." - * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, - * October 1995. Available from: - * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps - * - * See http://www.cs.arizona.edu/xkernel/ for their implementation. - * The main aspects that distinguish this implementation from the - * Arizona Vegas implementation are: - * o We do not change the loss detection or recovery mechanisms of - * Linux in any way. Linux already recovers from losses quite well, - * using fine-grained timers, NewReno, and FACK. - * o To avoid the performance penalty imposed by increasing cwnd - * only every-other RTT during slow start, we increase during - * every RTT during slow start, just like Reno. - * o Largely to allow continuous cwnd growth during slow start, - * we use the rate at which ACKs come back as the "actual" - * rate, rather than the rate at which data is sent. - * o To speed convergence to the right rate, we set the cwnd - * to achieve the right ("actual") rate when we exit slow start. - * o To filter out the noise caused by delayed ACKs, we use the - * minimum RTT sample observed during the last RTT to calculate - * the actual rate. - * o When the sender re-starts from idle, it waits until it has - * received ACKs for an entire flight of new data before making - * a cwnd adjustment decision. The original Vegas implementation - * assumed senders never went idle. - */ -static void vegas_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) -{ - /* The key players are v_beg_snd_una and v_beg_snd_nxt. - * - * These are so named because they represent the approximate values - * of snd_una and snd_nxt at the beginning of the current RTT. More - * precisely, they represent the amount of data sent during the RTT. - * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, - * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding - * bytes of data have been ACKed during the course of the RTT, giving - * an "actual" rate of: - * - * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) - * - * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, - * because delayed ACKs can cover more than one segment, so they - * don't line up nicely with the boundaries of RTTs. - * - * Another unfortunate fact of life is that delayed ACKs delay the - * advance of the left edge of our send window, so that the number - * of bytes we send in an RTT is often less than our cwnd will allow. - * So we keep track of our cwnd separately, in v_beg_snd_cwnd. - */ - - if (after(ack, tp->vegas.beg_snd_nxt)) { - /* Do the Vegas once-per-RTT cwnd adjustment. */ - u32 old_wnd, old_snd_cwnd; - - - /* Here old_wnd is essentially the window of data that was - * sent during the previous RTT, and has all - * been acknowledged in the course of the RTT that ended - * with the ACK we just received. Likewise, old_snd_cwnd - * is the cwnd during the previous RTT. - */ - old_wnd = (tp->vegas.beg_snd_nxt - tp->vegas.beg_snd_una) / - tp->mss_cache_std; - old_snd_cwnd = tp->vegas.beg_snd_cwnd; - - /* Save the extent of the current window so we can use this - * at the end of the next RTT. - */ - tp->vegas.beg_snd_una = tp->vegas.beg_snd_nxt; - tp->vegas.beg_snd_nxt = tp->snd_nxt; - tp->vegas.beg_snd_cwnd = tp->snd_cwnd; - - /* Take into account the current RTT sample too, to - * decrease the impact of delayed acks. This double counts - * this sample since we count it for the next window as well, - * but that's not too awful, since we're taking the min, - * rather than averaging. - */ - vegas_rtt_calc(tp, seq_rtt); - - /* We do the Vegas calculations only if we got enough RTT - * samples that we can be reasonably sure that we got - * at least one RTT sample that wasn't from a delayed ACK. - * If we only had 2 samples total, - * then that means we're getting only 1 ACK per RTT, which - * means they're almost certainly delayed ACKs. - * If we have 3 samples, we should be OK. - */ - - if (tp->vegas.cntRTT <= 2) { - /* We don't have enough RTT samples to do the Vegas - * calculation, so we'll behave like Reno. - */ - if (tp->snd_cwnd > tp->snd_ssthresh) - tp->snd_cwnd++; - } else { - u32 rtt, target_cwnd, diff; - - /* We have enough RTT samples, so, using the Vegas - * algorithm, we determine if we should increase or - * decrease cwnd, and by how much. - */ - - /* Pluck out the RTT we are using for the Vegas - * calculations. This is the min RTT seen during the - * last RTT. Taking the min filters out the effects - * of delayed ACKs, at the cost of noticing congestion - * a bit later. - */ - rtt = tp->vegas.minRTT; - - /* Calculate the cwnd we should have, if we weren't - * going too fast. - * - * This is: - * (actual rate in segments) * baseRTT - * We keep it as a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary point. - */ - target_cwnd = ((old_wnd * tp->vegas.baseRTT) - << V_PARAM_SHIFT) / rtt; - - /* Calculate the difference between the window we had, - * and the window we would like to have. This quantity - * is the "Diff" from the Arizona Vegas papers. - * - * Again, this is a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary - * point. - */ - diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; - - if (tp->snd_cwnd < tp->snd_ssthresh) { - /* Slow start. */ - if (diff > sysctl_tcp_vegas_gamma) { - /* Going too fast. Time to slow down - * and switch to congestion avoidance. - */ - tp->snd_ssthresh = 2; - - /* Set cwnd to match the actual rate - * exactly: - * cwnd = (actual rate) * baseRTT - * Then we add 1 because the integer - * truncation robs us of full link - * utilization. - */ - tp->snd_cwnd = min(tp->snd_cwnd, - (target_cwnd >> - V_PARAM_SHIFT)+1); - - } - } else { - /* Congestion avoidance. */ - u32 next_snd_cwnd; - - /* Figure out where we would like cwnd - * to be. - */ - if (diff > sysctl_tcp_vegas_beta) { - /* The old window was too fast, so - * we slow down. - */ - next_snd_cwnd = old_snd_cwnd - 1; - } else if (diff < sysctl_tcp_vegas_alpha) { - /* We don't have enough extra packets - * in the network, so speed up. - */ - next_snd_cwnd = old_snd_cwnd + 1; - } else { - /* Sending just as fast as we - * should be. - */ - next_snd_cwnd = old_snd_cwnd; - } - - /* Adjust cwnd upward or downward, toward the - * desired value. - */ - if (next_snd_cwnd > tp->snd_cwnd) - tp->snd_cwnd++; - else if (next_snd_cwnd < tp->snd_cwnd) - tp->snd_cwnd--; - } - } - - /* Wipe the slate clean for the next RTT. */ - tp->vegas.cntRTT = 0; - tp->vegas.minRTT = 0x7fffffff; - } - - /* The following code is executed for every ack we receive, - * except for conditions checked in should_advance_cwnd() - * before the call to tcp_cong_avoid(). Mainly this means that - * we only execute this code if the ack actually acked some - * data. - */ - - /* If we are in slow start, increase our cwnd in response to this ACK. - * (If we are not in slow start then we are in congestion avoidance, - * and adjust our congestion window only once per RTT. See the code - * above.) - */ - if (tp->snd_cwnd <= tp->snd_ssthresh) - tp->snd_cwnd++; - - /* to keep cwnd from growing without bound */ - tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); - - /* Make sure that we are never so timid as to reduce our cwnd below - * 2 MSS. - * - * Going below 2 MSS would risk huge delayed ACKs from our receiver. - */ - tp->snd_cwnd = max(tp->snd_cwnd, 2U); - - tp->snd_cwnd_stamp = tcp_time_stamp; -} - -static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) -{ - if (tcp_vegas_enabled(tp)) - vegas_cong_avoid(tp, ack, seq_rtt); - else - reno_cong_avoid(tp); -} - /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ @@ -2415,13 +2024,18 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, /* Remove acknowledged frames from the retransmission queue. */ -static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) +static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; __u32 now = tcp_time_stamp; int acked = 0; __s32 seq_rtt = -1; + struct timeval usnow; + u32 pkts_acked = 0; + + if (seq_usrtt) + do_gettimeofday(&usnow); while ((skb = skb_peek(&sk->sk_write_queue)) && skb != sk->sk_send_head) { @@ -2448,6 +2062,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) */ if (!(scb->flags & TCPCB_FLAG_SYN)) { acked |= FLAG_DATA_ACKED; + ++pkts_acked; } else { acked |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; @@ -2461,6 +2076,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) seq_rtt = -1; } else if (seq_rtt < 0) seq_rtt = now - scb->when; + if (seq_usrtt) + *seq_usrtt = (usnow.tv_sec - skb->stamp.tv_sec) * 1000000 + + (usnow.tv_usec - skb->stamp.tv_usec); + if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= tcp_skb_pcount(skb); if (sacked & TCPCB_LOST) @@ -2479,8 +2098,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } if (acked&FLAG_ACKED) { - tcp_ack_update_rtt(tp, acked, seq_rtt); + tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); tcp_ack_packets_out(sk, tp); + + if (tp->ca_ops->pkts_acked) + tp->ca_ops->pkts_acked(tp, pkts_acked); } #if FASTRETRANS_DEBUG > 0 @@ -2624,257 +2246,6 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) tp->frto_counter = (tp->frto_counter + 1) % 3; } -/* - * TCP Westwood+ - */ - -/* - * @init_westwood - * This function initializes fields used in TCP Westwood+. We can't - * get no information about RTTmin at this time so we simply set it to - * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative - * since in this way we're sure it will be updated in a consistent - * way as soon as possible. It will reasonably happen within the first - * RTT period of the connection lifetime. - */ - -static void init_westwood(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.bw_ns_est = 0; - tp->westwood.bw_est = 0; - tp->westwood.accounted = 0; - tp->westwood.cumul_ack = 0; - tp->westwood.rtt_win_sx = tcp_time_stamp; - tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT; - tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT; - tp->westwood.snd_una = tp->snd_una; -} - -/* - * @westwood_do_filter - * Low-pass filter. Implemented using constant coeffients. - */ - -static inline __u32 westwood_do_filter(__u32 a, __u32 b) -{ - return (((7 * a) + b) >> 3); -} - -static void westwood_filter(struct sock *sk, __u32 delta) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.bw_ns_est = - westwood_do_filter(tp->westwood.bw_ns_est, - tp->westwood.bk / delta); - tp->westwood.bw_est = - westwood_do_filter(tp->westwood.bw_est, - tp->westwood.bw_ns_est); -} - -/* - * @westwood_update_rttmin - * It is used to update RTTmin. In this case we MUST NOT use - * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN! - */ - -static inline __u32 westwood_update_rttmin(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - __u32 rttmin = tp->westwood.rtt_min; - - if (tp->westwood.rtt != 0 && - (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)) - rttmin = tp->westwood.rtt; - - return rttmin; -} - -/* - * @westwood_acked - * Evaluate increases for dk. - */ - -static inline __u32 westwood_acked(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - - return tp->snd_una - tp->westwood.snd_una; -} - -/* - * @westwood_new_window - * It evaluates if we are receiving data inside the same RTT window as - * when we started. - * Return value: - * It returns 0 if we are still evaluating samples in the same RTT - * window, 1 if the sample has to be considered in the next window. - */ - -static int westwood_new_window(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - __u32 left_bound; - __u32 rtt; - int ret = 0; - - left_bound = tp->westwood.rtt_win_sx; - rtt = max(tp->westwood.rtt, (u32) TCP_WESTWOOD_RTT_MIN); - - /* - * A RTT-window has passed. Be careful since if RTT is less than - * 50ms we don't filter but we continue 'building the sample'. - * This minimum limit was choosen since an estimation on small - * time intervals is better to avoid... - * Obvioulsy on a LAN we reasonably will always have - * right_bound = left_bound + WESTWOOD_RTT_MIN - */ - - if ((left_bound + rtt) < tcp_time_stamp) - ret = 1; - - return ret; -} - -/* - * @westwood_update_window - * It updates RTT evaluation window if it is the right moment to do - * it. If so it calls filter for evaluating bandwidth. - */ - -static void __westwood_update_window(struct sock *sk, __u32 now) -{ - struct tcp_sock *tp = tcp_sk(sk); - __u32 delta = now - tp->westwood.rtt_win_sx; - - if (delta) { - if (tp->westwood.rtt) - westwood_filter(sk, delta); - - tp->westwood.bk = 0; - tp->westwood.rtt_win_sx = tcp_time_stamp; - } -} - - -static void westwood_update_window(struct sock *sk, __u32 now) -{ - if (westwood_new_window(sk)) - __westwood_update_window(sk, now); -} - -/* - * @__tcp_westwood_fast_bw - * It is called when we are in fast path. In particular it is called when - * header prediction is successfull. In such case infact update is - * straight forward and doesn't need any particular care. - */ - -static void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - westwood_update_window(sk, tcp_time_stamp); - - tp->westwood.bk += westwood_acked(sk); - tp->westwood.snd_una = tp->snd_una; - tp->westwood.rtt_min = westwood_update_rttmin(sk); -} - -static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) -{ - if (tcp_is_westwood(tcp_sk(sk))) - __tcp_westwood_fast_bw(sk, skb); -} - - -/* - * @westwood_dupack_update - * It updates accounted and cumul_ack when receiving a dupack. - */ - -static void westwood_dupack_update(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.accounted += tp->mss_cache_std; - tp->westwood.cumul_ack = tp->mss_cache_std; -} - -static inline int westwood_may_change_cumul(struct tcp_sock *tp) -{ - return (tp->westwood.cumul_ack > tp->mss_cache_std); -} - -static inline void westwood_partial_update(struct tcp_sock *tp) -{ - tp->westwood.accounted -= tp->westwood.cumul_ack; - tp->westwood.cumul_ack = tp->mss_cache_std; -} - -static inline void westwood_complete_update(struct tcp_sock *tp) -{ - tp->westwood.cumul_ack -= tp->westwood.accounted; - tp->westwood.accounted = 0; -} - -/* - * @westwood_acked_count - * This function evaluates cumul_ack for evaluating dk in case of - * delayed or partial acks. - */ - -static inline __u32 westwood_acked_count(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->westwood.cumul_ack = westwood_acked(sk); - - /* If cumul_ack is 0 this is a dupack since it's not moving - * tp->snd_una. - */ - if (!(tp->westwood.cumul_ack)) - westwood_dupack_update(sk); - - if (westwood_may_change_cumul(tp)) { - /* Partial or delayed ack */ - if (tp->westwood.accounted >= tp->westwood.cumul_ack) - westwood_partial_update(tp); - else - westwood_complete_update(tp); - } - - tp->westwood.snd_una = tp->snd_una; - - return tp->westwood.cumul_ack; -} - - -/* - * @__tcp_westwood_slow_bw - * It is called when something is going wrong..even if there could - * be no problems! Infact a simple delayed packet may trigger a - * dupack. But we need to be careful in such case. - */ - -static void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - westwood_update_window(sk, tcp_time_stamp); - - tp->westwood.bk += westwood_acked_count(sk); - tp->westwood.rtt_min = westwood_update_rttmin(sk); -} - -static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) -{ - if (tcp_is_westwood(tcp_sk(sk))) - __tcp_westwood_slow_bw(sk, skb); -} - /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) { @@ -2884,6 +2255,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 prior_in_flight; s32 seq_rtt; + s32 seq_usrtt = 0; int prior_packets; /* If the ack is newer than sent or older than previous acks @@ -2902,9 +2274,10 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) */ tcp_update_wl(tp, ack, ack_seq); tp->snd_una = ack; - tcp_westwood_fast_bw(sk, skb); flag |= FLAG_WIN_UPDATE; + tcp_ca_event(tp, CA_EVENT_FAST_ACK); + NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); } else { if (ack_seq != TCP_SKB_CB(skb)->end_seq) @@ -2920,7 +2293,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) flag |= FLAG_ECE; - tcp_westwood_slow_bw(sk,skb); + tcp_ca_event(tp, CA_EVENT_SLOW_ACK); } /* We passed data and got it acked, remove any soft error @@ -2935,22 +2308,20 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) prior_in_flight = tcp_packets_in_flight(tp); /* See if we can take anything off of the retransmit queue. */ - flag |= tcp_clean_rtx_queue(sk, &seq_rtt); + flag |= tcp_clean_rtx_queue(sk, &seq_rtt, + tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); if (tp->frto_counter) tcp_process_frto(sk, prior_snd_una); if (tcp_ack_is_dubious(tp, flag)) { /* Advanve CWND, if state allows this. */ - if ((flag & FLAG_DATA_ACKED) && - (tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd) && - tcp_may_raise_cwnd(tp, flag)) - tcp_cong_avoid(tp, ack, seq_rtt); + if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) + tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); } else { - if ((flag & FLAG_DATA_ACKED) && - (tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd)) - tcp_cong_avoid(tp, ack, seq_rtt); + if ((flag & FLAG_DATA_ACKED)) + tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) @@ -4552,6 +3923,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_init_metrics(sk); + tcp_init_congestion_control(tp); + /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ @@ -4708,9 +4081,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, if(tp->af_specific->conn_request(sk, skb) < 0) return 1; - init_westwood(sk); - init_bictcp(tp); - /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the @@ -4732,9 +4102,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; case TCP_SYN_SENT: - init_westwood(sk); - init_bictcp(tp); - queued = tcp_rcv_synsent_state_process(sk, skb, th, len); if (queued >= 0) return queued; @@ -4816,7 +4183,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !tp->srtt) - tcp_ack_saw_tstamp(tp, 0); + tcp_ack_saw_tstamp(tp, 0, 0); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; @@ -4828,6 +4195,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tcp_init_metrics(sk); + tcp_init_congestion_control(tp); + /* Prevent spurious tcp_cwnd_restart() on * first data packet. */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2d41d5d6ad19..9122814c13ad 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2048,6 +2048,7 @@ static int tcp_v4_init_sock(struct sock *sk) tp->mss_cache_std = tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; + tp->ca_ops = &tcp_reno; sk->sk_state = TCP_CLOSE; @@ -2070,6 +2071,8 @@ int tcp_v4_destroy_sock(struct sock *sk) tcp_clear_xmit_timers(sk); + tcp_cleanup_congestion_control(tp); + /* Cleanup up the write buffer. */ sk_stream_writequeue_purge(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index b3943e7562f3..f42a284164b7 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -774,6 +774,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->frto_counter = 0; newtp->frto_highmark = 0; + newtp->ca_ops = &tcp_reno; + tcp_set_ca_state(newtp, TCP_CA_Open); tcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); @@ -842,8 +844,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, if (newtp->ecn_flags&TCP_ECN_OK) sock_set_flag(newsk, SOCK_NO_LARGESEND); - tcp_ca_init(newtp); - TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); } return newsk; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f17c6577e337..0e17c244875c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -111,8 +111,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) u32 restart_cwnd = tcp_init_cwnd(tp, dst); u32 cwnd = tp->snd_cwnd; - if (tcp_is_vegas(tp)) - tcp_vegas_enable(tp); + tcp_ca_event(tp, CA_EVENT_CWND_RESTART); tp->snd_ssthresh = tcp_current_ssthresh(tp); restart_cwnd = min(restart_cwnd, cwnd); @@ -280,6 +279,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) #define SYSCTL_FLAG_WSCALE 0x2 #define SYSCTL_FLAG_SACK 0x4 + /* If congestion control is doing timestamping */ + if (tp->ca_ops->rtt_sample) + do_gettimeofday(&skb->stamp); + sysctl_flags = 0; if (tcb->flags & TCPCB_FLAG_SYN) { tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; @@ -304,17 +307,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); } - /* - * If the connection is idle and we are restarting, - * then we don't want to do any Vegas calculations - * until we get fresh RTT samples. So when we - * restart, we reset our Vegas state to a clean - * slate. After we get acks for this flight of - * packets, _then_ we can make Vegas calculations - * again. - */ - if (tcp_is_vegas(tp) && tcp_packets_in_flight(tp) == 0) - tcp_vegas_enable(tp); + if (tcp_packets_in_flight(tp) == 0) + tcp_ca_event(tp, CA_EVENT_TX_START); th = (struct tcphdr *) skb_push(skb, tcp_header_size); skb->h.th = th; @@ -521,6 +515,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) * skbs, which it never sent before. --ANK */ TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; + buff->stamp = skb->stamp; if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { tp->lost_out -= tcp_skb_pcount(skb); @@ -1449,7 +1444,6 @@ static inline void tcp_connect_init(struct sock *sk) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->advmss = dst_metric(dst, RTAX_ADVMSS); tcp_initialize_rcv_mss(sk); - tcp_ca_init(tp); tcp_select_initial_window(tcp_full_space(sk), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), @@ -1503,7 +1497,6 @@ int tcp_connect(struct sock *sk) TCP_SKB_CB(buff)->end_seq = tp->write_seq; tp->snd_nxt = tp->write_seq; tp->pushed_seq = tp->write_seq; - tcp_ca_init(tp); /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2414937f2a83..fce56039b0e9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2025,7 +2025,7 @@ static int tcp_v6_init_sock(struct sock *sk) sk->sk_state = TCP_CLOSE; tp->af_specific = &ipv6_specific; - + tp->ca_ops = &tcp_reno; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); -- cgit v1.2.3 From 51b0bdedb8e784d0d969a6b77151911130812400 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 23 Jun 2005 20:14:40 -0700 Subject: [NET]: Separate two usages of netdev_max_backlog. Separate out the two uses of netdev_max_backlog. One controls the upper bound on packets processed per softirq, the new name for this is netdev_budget; the other controls the limit on packets queued via netif_rx. Increase the max_backlog default to account for faster processors. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/sysctl.h | 1 + net/core/dev.c | 6 +++--- net/core/sysctl_net_core.c | 9 +++++++++ 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include/linux/sysctl.h') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 72965bfe6cfb..ebfe1250f0a4 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -243,6 +243,7 @@ enum NET_CORE_MOD_CONG=16, NET_CORE_DEV_WEIGHT=17, NET_CORE_SOMAXCONN=18, + NET_CORE_BUDGET=19, }; /* /proc/sys/net/ethernet */ diff --git a/net/core/dev.c b/net/core/dev.c index 1a64508e527f..7016e0c36b3d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1346,7 +1346,8 @@ out: Receiver routines =======================================================================*/ -int netdev_max_backlog = 300; +int netdev_max_backlog = 1000; +int netdev_budget = 300; int weight_p = 64; /* old backlog weight */ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; @@ -1695,8 +1696,7 @@ static void net_rx_action(struct softirq_action *h) { struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; - int budget = netdev_max_backlog; - + int budget = netdev_budget; local_irq_disable(); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index fff63643a35c..8f817ad9f546 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -13,6 +13,7 @@ #ifdef CONFIG_SYSCTL extern int netdev_max_backlog; +extern int netdev_budget; extern int weight_p; extern int net_msg_cost; extern int net_msg_burst; @@ -124,6 +125,14 @@ ctl_table core_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_CORE_BUDGET, + .procname = "netdev_budget", + .data = &netdev_budget, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .ctl_name = 0 } }; -- cgit v1.2.3 From 2f85a42964dd43fed3a339701db046bee5a8b903 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 28 Jun 2005 13:24:23 -0700 Subject: [SCTP] Make init & delayed sack timeouts configurable by user. Signed-off-by: Vlad Yasevich Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- include/linux/sysctl.h | 1 + include/net/sctp/constants.h | 18 +++--------------- include/net/sctp/structs.h | 4 ++++ net/sctp/endpointola.c | 13 +++++-------- net/sctp/protocol.c | 5 ++++- net/sctp/sysctl.c | 13 +++++++++++++ net/sctp/transport.c | 1 - 7 files changed, 30 insertions(+), 25 deletions(-) (limited to 'include/linux/sysctl.h') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index ebfe1250f0a4..5b5f434ac9a0 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -641,6 +641,7 @@ enum { NET_SCTP_ADDIP_ENABLE = 13, NET_SCTP_PRSCTP_ENABLE = 14, NET_SCTP_SNDBUF_POLICY = 15, + NET_SCTP_SACK_TIMEOUT = 16, }; /* /proc/sys/net/bridge */ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 4868c7f7749d..5999e5684bbf 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -263,23 +263,11 @@ enum { SCTP_MIN_PMTU = 576 }; enum { SCTP_MAX_DUP_TSNS = 16 }; enum { SCTP_MAX_GABS = 16 }; -/* Here we define the default timers. */ +/* Heartbeat interval - 30 secs */ +#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (30 * HZ) -/* cookie timer def = ? seconds */ -#define SCTP_DEFAULT_TIMEOUT_T1_COOKIE (3 * HZ) - -/* init timer def = 3 seconds */ -#define SCTP_DEFAULT_TIMEOUT_T1_INIT (3 * HZ) - -/* shutdown timer def = 300 ms */ -#define SCTP_DEFAULT_TIMEOUT_T2_SHUTDOWN ((300 * HZ) / 1000) - -/* 0 seconds + RTO */ -#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (10 * HZ) - -/* recv timer def = 200ms (in usec) */ +/* Delayed sack timer - 200ms */ #define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000) -#define SCTP_DEFAULT_TIMEOUT_SACK_MAX ((500 * HZ) / 1000) /* 500 ms */ /* RTO.Initial - 3 seconds * RTO.Min - 1 second diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index dfad4d3c581c..47727c7cc628 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -161,6 +161,9 @@ extern struct sctp_globals { */ int sndbuf_policy; + /* Delayed SACK timeout 200ms default*/ + int sack_timeout; + /* HB.interval - 30 seconds */ int hb_interval; @@ -217,6 +220,7 @@ extern struct sctp_globals { #define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) #define sctp_max_retrans_path (sctp_globals.max_retrans_path) #define sctp_max_retrans_init (sctp_globals.max_retrans_init) +#define sctp_sack_timeout (sctp_globals.sack_timeout) #define sctp_hb_interval (sctp_globals.hb_interval) #define sctp_max_instreams (sctp_globals.max_instreams) #define sctp_max_outstreams (sctp_globals.max_outstreams) diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2ec0320fac3b..c44bf4165c6e 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -102,9 +102,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Set up the base timeout information. */ ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = - SCTP_DEFAULT_TIMEOUT_T1_COOKIE; + msecs_to_jiffies(sp->rtoinfo.srto_initial); ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = - SCTP_DEFAULT_TIMEOUT_T1_INIT; + msecs_to_jiffies(sp->rtoinfo.srto_initial); ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = msecs_to_jiffies(sp->rtoinfo.srto_initial); ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; @@ -117,12 +117,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max); - ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = - SCTP_DEFAULT_TIMEOUT_HEARTBEAT; - ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = - SCTP_DEFAULT_TIMEOUT_SACK; - ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = - sp->autoclose * HZ; + ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; + ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout; + ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = sctp_sndbuf_policy; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 5135e1a25d25..e7f37faba7c0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1050,7 +1050,10 @@ SCTP_STATIC __init int sctp_init(void) sctp_sndbuf_policy = 0; /* HB.interval - 30 seconds */ - sctp_hb_interval = 30 * HZ; + sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; + + /* delayed SACK timeout */ + sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK; /* Implementation specific variables. */ diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7fc31849312b..dc4893474f18 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -47,6 +47,8 @@ static ctl_handler sctp_sysctl_jiffies_ms; static long rto_timer_min = 1; static long rto_timer_max = 86400000; /* One day */ +static long sack_timer_min = 1; +static long sack_timer_max = 500; static ctl_table sctp_table[] = { { @@ -187,6 +189,17 @@ static ctl_table sctp_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_SCTP_SACK_TIMEOUT, + .procname = "sack_timeout", + .data = &sctp_sack_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_doulongvec_ms_jiffies_minmax, + .strategy = &sctp_sysctl_jiffies_ms, + .extra1 = &sack_timer_min, + .extra2 = &sack_timer_max, + }, { .ctl_name = 0 } }; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 0ec0fde6e6c5..a63b69179607 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -103,7 +103,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, /* Set up the heartbeat timer. */ init_timer(&peer->hb_timer); - peer->hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; peer->hb_timer.function = sctp_generate_heartbeat_event; peer->hb_timer.data = (unsigned long)peer; -- cgit v1.2.3 From 0eeca28300df110bd6ed54b31193c83b87921443 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 12 Jul 2005 17:06:03 -0400 Subject: [PATCH] inotify inotify is intended to correct the deficiencies of dnotify, particularly its inability to scale and its terrible user interface: * dnotify requires the opening of one fd per each directory that you intend to watch. This quickly results in too many open files and pins removable media, preventing unmount. * dnotify is directory-based. You only learn about changes to directories. Sure, a change to a file in a directory affects the directory, but you are then forced to keep a cache of stat structures. * dnotify's interface to user-space is awful. Signals? inotify provides a more usable, simple, powerful solution to file change notification: * inotify's interface is a system call that returns a fd, not SIGIO. You get a single fd, which is select()-able. * inotify has an event that says "the filesystem that the item you were watching is on was unmounted." * inotify can watch directories or files. Inotify is currently used by Beagle (a desktop search infrastructure), Gamin (a FAM replacement), and other projects. See Documentation/filesystems/inotify.txt. Signed-off-by: Robert Love Cc: John McCutchan Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/inotify.txt | 138 +++++ arch/i386/kernel/syscall_table.S | 3 + fs/Kconfig | 13 + fs/Makefile | 1 + fs/attr.c | 33 +- fs/compat.c | 12 +- fs/file_table.c | 3 + fs/inode.c | 6 + fs/inotify.c | 999 ++++++++++++++++++++++++++++++++++ fs/namei.c | 30 +- fs/nfsd/vfs.c | 6 +- fs/open.c | 3 +- fs/read_write.c | 15 +- fs/sysfs/file.c | 7 +- fs/xattr.c | 5 +- include/asm-i386/unistd.h | 5 +- include/linux/fs.h | 6 +- include/linux/fsnotify.h | 248 +++++++++ include/linux/inotify.h | 108 ++++ include/linux/sched.h | 4 + include/linux/sysctl.h | 11 +- kernel/sys_ni.c | 3 + kernel/sysctl.c | 43 +- kernel/user.c | 4 + 24 files changed, 1639 insertions(+), 67 deletions(-) create mode 100644 Documentation/filesystems/inotify.txt create mode 100644 fs/inotify.c create mode 100644 include/linux/fsnotify.h create mode 100644 include/linux/inotify.h (limited to 'include/linux/sysctl.h') diff --git a/Documentation/filesystems/inotify.txt b/Documentation/filesystems/inotify.txt new file mode 100644 index 000000000000..2c716041f578 --- /dev/null +++ b/Documentation/filesystems/inotify.txt @@ -0,0 +1,138 @@ + inotify + a powerful yet simple file change notification system + + + +Document started 15 Mar 2005 by Robert Love + +(i) User Interface + +Inotify is controlled by a set of three sys calls + +First step in using inotify is to initialise an inotify instance + + int fd = inotify_init (); + +Change events are managed by "watches". A watch is an (object,mask) pair where +the object is a file or directory and the mask is a bit mask of one or more +inotify events that the application wishes to receive. See +for valid events. A watch is referenced by a watch descriptor, or wd. + +Watches are added via a path to the file. + +Watches on a directory will return events on any files inside of the directory. + +Adding a watch is simple, + + int wd = inotify_add_watch (fd, path, mask); + +You can add a large number of files via something like + + for each file to watch { + int wd = inotify_add_watch (fd, file, mask); + } + +You can update an existing watch in the same manner, by passing in a new mask. + +An existing watch is removed via the INOTIFY_IGNORE ioctl, for example + + inotify_rm_watch (fd, wd); + +Events are provided in the form of an inotify_event structure that is read(2) +from a inotify instance fd. The filename is of dynamic length and follows the +struct. It is of size len. The filename is padded with null bytes to ensure +proper alignment. This padding is reflected in len. + +You can slurp multiple events by passing a large buffer, for example + + size_t len = read (fd, buf, BUF_LEN); + +Will return as many events as are available and fit in BUF_LEN. + +each inotify instance fd is also select()- and poll()-able. + +You can find the size of the current event queue via the FIONREAD ioctl. + +All watches are destroyed and cleaned up on close. + + +(ii) Internal Kernel Implementation + +Each open inotify instance is associated with an inotify_device structure. + +Each watch is associated with an inotify_watch structure. Watches are chained +off of each associated device and each associated inode. + +See fs/inotify.c for the locking and lifetime rules. + + +(iii) Rationale + +Q: What is the design decision behind not tying the watch to the open fd of + the watched object? + +A: Watches are associated with an open inotify device, not an open file. + This solves the primary problem with dnotify: keeping the file open pins + the file and thus, worse, pins the mount. Dnotify is therefore infeasible + for use on a desktop system with removable media as the media cannot be + unmounted. + +Q: What is the design decision behind using an-fd-per-device as opposed to + an fd-per-watch? + +A: An fd-per-watch quickly consumes more file descriptors than are allowed, + more fd's than are feasible to manage, and more fd's than are optimally + select()-able. Yes, root can bump the per-process fd limit and yes, users + can use epoll, but requiring both is a silly and extraneous requirement. + A watch consumes less memory than an open file, separating the number + spaces is thus sensible. The current design is what user-space developers + want: Users initialize inotify, once, and add n watches, requiring but one fd + and no twiddling with fd limits. Initializing an inotify instance two + thousand times is silly. If we can implement user-space's preferences + cleanly--and we can, the idr layer makes stuff like this trivial--then we + should. + + There are other good arguments. With a single fd, there is a single + item to block on, which is mapped to a single queue of events. The single + fd returns all watch events and also any potential out-of-band data. If + every fd was a separate watch, + + - There would be no way to get event ordering. Events on file foo and + file bar would pop poll() on both fd's, but there would be no way to tell + which happened first. A single queue trivially gives you ordering. Such + ordering is crucial to existing applications such as Beagle. Imagine + "mv a b ; mv b a" events without ordering. + + - We'd have to maintain n fd's and n internal queues with state, + versus just one. It is a lot messier in the kernel. A single, linear + queue is the data structure that makes sense. + + - User-space developers prefer the current API. The Beagle guys, for + example, love it. Trust me, I asked. It is not a surprise: Who'd want + to manage and block on 1000 fd's via select? + + - You'd have to manage the fd's, as an example: Call close() when you + received a delete event. + + - No way to get out of band data. + + - 1024 is still too low. ;-) + + When you talk about designing a file change notification system that + scales to 1000s of directories, juggling 1000s of fd's just does not seem + the right interface. It is too heavy. + +Q: Why the system call approach? + +A: The poor user-space interface is the second biggest problem with dnotify. + Signals are a terrible, terrible interface for file notification. Or for + anything, for that matter. The ideal solution, from all perspectives, is a + file descriptor-based one that allows basic file I/O and poll/select. + Obtaining the fd and managing the watches could have been done either via a + device file or a family of new system calls. We decided to implement a + family of system calls because that is the preffered approach for new kernel + features and it means our user interface requirements. + + Additionally, it _is_ possible to more than one instance and + juggle more than one queue and thus more than one associated fd. + diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 3db9a04aec6e..468500a7e894 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -291,3 +291,6 @@ ENTRY(sys_call_table) .long sys_keyctl .long sys_ioprio_set .long sys_ioprio_get /* 290 */ + .long sys_inotify_init + .long sys_inotify_add_watch + .long sys_inotify_rm_watch diff --git a/fs/Kconfig b/fs/Kconfig index f93fd41b025d..5d0c4be43dba 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -359,6 +359,19 @@ config ROMFS_FS If you don't know whether you need it, then you don't need it: answer N. +config INOTIFY + bool "Inotify file change notification support" + default y + ---help--- + Say Y here to enable inotify support and the /dev/inotify character + device. Inotify is a file change notification system and a + replacement for dnotify. Inotify fixes numerous shortcomings in + dnotify and introduces several new features. It allows monitoring + of both files and directories via a single open fd. Multiple file + events are supported. + + If unsure, say Y. + config QUOTA bool "Quota support" help diff --git a/fs/Makefile b/fs/Makefile index 20edcf28bfd2..cf95eb894fd5 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -12,6 +12,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ ioprio.o +obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_COMPAT) += compat.o diff --git a/fs/attr.c b/fs/attr.c index c3c76fe78346..b1796fb9e524 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -107,31 +107,8 @@ int inode_setattr(struct inode * inode, struct iattr * attr) out: return error; } - EXPORT_SYMBOL(inode_setattr); -int setattr_mask(unsigned int ia_valid) -{ - unsigned long dn_mask = 0; - - if (ia_valid & ATTR_UID) - dn_mask |= DN_ATTRIB; - if (ia_valid & ATTR_GID) - dn_mask |= DN_ATTRIB; - if (ia_valid & ATTR_SIZE) - dn_mask |= DN_MODIFY; - /* both times implies a utime(s) call */ - if ((ia_valid & (ATTR_ATIME|ATTR_MTIME)) == (ATTR_ATIME|ATTR_MTIME)) - dn_mask |= DN_ATTRIB; - else if (ia_valid & ATTR_ATIME) - dn_mask |= DN_ACCESS; - else if (ia_valid & ATTR_MTIME) - dn_mask |= DN_MODIFY; - if (ia_valid & ATTR_MODE) - dn_mask |= DN_ATTRIB; - return dn_mask; -} - int notify_change(struct dentry * dentry, struct iattr * attr) { struct inode *inode = dentry->d_inode; @@ -197,11 +174,9 @@ int notify_change(struct dentry * dentry, struct iattr * attr) if (ia_valid & ATTR_SIZE) up_write(&dentry->d_inode->i_alloc_sem); - if (!error) { - unsigned long dn_mask = setattr_mask(ia_valid); - if (dn_mask) - dnotify_parent(dentry, dn_mask); - } + if (!error) + fsnotify_change(dentry, ia_valid); + return error; } diff --git a/fs/compat.c b/fs/compat.c index 728cd8365384..6b06b6bae35e 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include @@ -1307,9 +1307,13 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, out: if (iov != iovstack) kfree(iov); - if ((ret + (type == READ)) > 0) - dnotify_parent(file->f_dentry, - (type == READ) ? DN_ACCESS : DN_MODIFY); + if ((ret + (type == READ)) > 0) { + struct dentry *dentry = file->f_dentry; + if (type == READ) + fsnotify_access(dentry); + else + fsnotify_modify(dentry); + } return ret; } diff --git a/fs/file_table.c b/fs/file_table.c index fa7849fae134..1d3de78e6bc9 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -16,6 +16,7 @@ #include #include #include +#include /* sysctl tunables... */ struct files_stat_struct files_stat = { @@ -126,6 +127,8 @@ void fastcall __fput(struct file *file) struct inode *inode = dentry->d_inode; might_sleep(); + + fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. diff --git a/fs/inode.c b/fs/inode.c index 5bc97507eeaa..96364fae0844 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -21,6 +21,7 @@ #include #include #include +#include /* * This is needed for the following functions: @@ -202,6 +203,10 @@ void inode_init_once(struct inode *inode) INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); spin_lock_init(&inode->i_lock); i_size_ordered_init(inode); +#ifdef CONFIG_INOTIFY + INIT_LIST_HEAD(&inode->inotify_watches); + sema_init(&inode->inotify_sem, 1); +#endif } EXPORT_SYMBOL(inode_init_once); @@ -351,6 +356,7 @@ int invalidate_inodes(struct super_block * sb) down(&iprune_sem); spin_lock(&inode_lock); + inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); diff --git a/fs/inotify.c b/fs/inotify.c new file mode 100644 index 000000000000..e423bfe0c86f --- /dev/null +++ b/fs/inotify.c @@ -0,0 +1,999 @@ +/* + * fs/inotify.c - inode-based file event notifications + * + * Authors: + * John McCutchan + * Robert Love + * + * Copyright (C) 2005 John McCutchan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static atomic_t inotify_cookie; + +static kmem_cache_t *watch_cachep; +static kmem_cache_t *event_cachep; + +static struct vfsmount *inotify_mnt; + +/* These are configurable via /proc/sys/inotify */ +int inotify_max_user_devices; +int inotify_max_user_watches; +int inotify_max_queued_events; + +/* + * Lock ordering: + * + * dentry->d_lock (used to keep d_move() away from dentry->d_parent) + * iprune_sem (synchronize shrink_icache_memory()) + * inode_lock (protects the super_block->s_inodes list) + * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) + * inotify_dev->sem (protects inotify_device and watches->d_list) + */ + +/* + * Lifetimes of the three main data structures--inotify_device, inode, and + * inotify_watch--are managed by reference count. + * + * inotify_device: Lifetime is from open until release. Additional references + * can bump the count via get_inotify_dev() and drop the count via + * put_inotify_dev(). + * + * inotify_watch: Lifetime is from create_watch() to destory_watch(). + * Additional references can bump the count via get_inotify_watch() and drop + * the count via put_inotify_watch(). + * + * inode: Pinned so long as the inode is associated with a watch, from + * create_watch() to put_inotify_watch(). + */ + +/* + * struct inotify_device - represents an open instance of an inotify device + * + * This structure is protected by the semaphore 'sem'. + */ +struct inotify_device { + wait_queue_head_t wq; /* wait queue for i/o */ + struct idr idr; /* idr mapping wd -> watch */ + struct semaphore sem; /* protects this bad boy */ + struct list_head events; /* list of queued events */ + struct list_head watches; /* list of watches */ + atomic_t count; /* reference count */ + struct user_struct *user; /* user who opened this dev */ + unsigned int queue_size; /* size of the queue (bytes) */ + unsigned int event_count; /* number of pending events */ + unsigned int max_events; /* maximum number of events */ +}; + +/* + * struct inotify_kernel_event - An inotify event, originating from a watch and + * queued for user-space. A list of these is attached to each instance of the + * device. In read(), this list is walked and all events that can fit in the + * buffer are returned. + * + * Protected by dev->sem of the device in which we are queued. + */ +struct inotify_kernel_event { + struct inotify_event event; /* the user-space event */ + struct list_head list; /* entry in inotify_device's list */ + char *name; /* filename, if any */ +}; + +/* + * struct inotify_watch - represents a watch request on a specific inode + * + * d_list is protected by dev->sem of the associated watch->dev. + * i_list and mask are protected by inode->inotify_sem of the associated inode. + * dev, inode, and wd are never written to once the watch is created. + */ +struct inotify_watch { + struct list_head d_list; /* entry in inotify_device's list */ + struct list_head i_list; /* entry in inode's list */ + atomic_t count; /* reference count */ + struct inotify_device *dev; /* associated device */ + struct inode *inode; /* associated inode */ + s32 wd; /* watch descriptor */ + u32 mask; /* event mask for this watch */ +}; + +static inline void get_inotify_dev(struct inotify_device *dev) +{ + atomic_inc(&dev->count); +} + +static inline void put_inotify_dev(struct inotify_device *dev) +{ + if (atomic_dec_and_test(&dev->count)) { + atomic_dec(&dev->user->inotify_devs); + free_uid(dev->user); + kfree(dev); + } +} + +static inline void get_inotify_watch(struct inotify_watch *watch) +{ + atomic_inc(&watch->count); +} + +/* + * put_inotify_watch - decrements the ref count on a given watch. cleans up + * the watch and its references if the count reaches zero. + */ +static inline void put_inotify_watch(struct inotify_watch *watch) +{ + if (atomic_dec_and_test(&watch->count)) { + put_inotify_dev(watch->dev); + iput(watch->inode); + kmem_cache_free(watch_cachep, watch); + } +} + +/* + * kernel_event - create a new kernel event with the given parameters + * + * This function can sleep. + */ +static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, + const char *name) +{ + struct inotify_kernel_event *kevent; + + kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); + if (unlikely(!kevent)) + return NULL; + + /* we hand this out to user-space, so zero it just in case */ + memset(&kevent->event, 0, sizeof(struct inotify_event)); + + kevent->event.wd = wd; + kevent->event.mask = mask; + kevent->event.cookie = cookie; + + INIT_LIST_HEAD(&kevent->list); + + if (name) { + size_t len, rem, event_size = sizeof(struct inotify_event); + + /* + * We need to pad the filename so as to properly align an + * array of inotify_event structures. Because the structure is + * small and the common case is a small filename, we just round + * up to the next multiple of the structure's sizeof. This is + * simple and safe for all architectures. + */ + len = strlen(name) + 1; + rem = event_size - len; + if (len > event_size) { + rem = event_size - (len % event_size); + if (len % event_size == 0) + rem = 0; + } + + kevent->name = kmalloc(len + rem, GFP_KERNEL); + if (unlikely(!kevent->name)) { + kmem_cache_free(event_cachep, kevent); + return NULL; + } + memcpy(kevent->name, name, len); + if (rem) + memset(kevent->name + len, 0, rem); + kevent->event.len = len + rem; + } else { + kevent->event.len = 0; + kevent->name = NULL; + } + + return kevent; +} + +/* + * inotify_dev_get_event - return the next event in the given dev's queue + * + * Caller must hold dev->sem. + */ +static inline struct inotify_kernel_event * +inotify_dev_get_event(struct inotify_device *dev) +{ + return list_entry(dev->events.next, struct inotify_kernel_event, list); +} + +/* + * inotify_dev_queue_event - add a new event to the given device + * + * Caller must hold dev->sem. Can sleep (calls kernel_event()). + */ +static void inotify_dev_queue_event(struct inotify_device *dev, + struct inotify_watch *watch, u32 mask, + u32 cookie, const char *name) +{ + struct inotify_kernel_event *kevent, *last; + + /* coalescing: drop this event if it is a dupe of the previous */ + last = inotify_dev_get_event(dev); + if (last && last->event.mask == mask && last->event.wd == watch->wd && + last->event.cookie == cookie) { + const char *lastname = last->name; + + if (!name && !lastname) + return; + if (name && lastname && !strcmp(lastname, name)) + return; + } + + /* the queue overflowed and we already sent the Q_OVERFLOW event */ + if (unlikely(dev->event_count > dev->max_events)) + return; + + /* if the queue overflows, we need to notify user space */ + if (unlikely(dev->event_count == dev->max_events)) + kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); + else + kevent = kernel_event(watch->wd, mask, cookie, name); + + if (unlikely(!kevent)) + return; + + /* queue the event and wake up anyone waiting */ + dev->event_count++; + dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; + list_add_tail(&kevent->list, &dev->events); + wake_up_interruptible(&dev->wq); +} + +/* + * remove_kevent - cleans up and ultimately frees the given kevent + * + * Caller must hold dev->sem. + */ +static void remove_kevent(struct inotify_device *dev, + struct inotify_kernel_event *kevent) +{ + list_del(&kevent->list); + + dev->event_count--; + dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; + + kfree(kevent->name); + kmem_cache_free(event_cachep, kevent); +} + +/* + * inotify_dev_event_dequeue - destroy an event on the given device + * + * Caller must hold dev->sem. + */ +static void inotify_dev_event_dequeue(struct inotify_device *dev) +{ + if (!list_empty(&dev->events)) { + struct inotify_kernel_event *kevent; + kevent = inotify_dev_get_event(dev); + remove_kevent(dev, kevent); + } +} + +/* + * inotify_dev_get_wd - returns the next WD for use by the given dev + * + * Callers must hold dev->sem. This function can sleep. + */ +static int inotify_dev_get_wd(struct inotify_device *dev, + struct inotify_watch *watch) +{ + int ret; + + do { + if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) + return -ENOSPC; + ret = idr_get_new(&dev->idr, watch, &watch->wd); + } while (ret == -EAGAIN); + + return ret; +} + +/* + * find_inode - resolve a user-given path to a specific inode and return a nd + */ +static int find_inode(const char __user *dirname, struct nameidata *nd) +{ + int error; + + error = __user_walk(dirname, LOOKUP_FOLLOW, nd); + if (error) + return error; + /* you can only watch an inode if you have read permissions on it */ + error = permission(nd->dentry->d_inode, MAY_READ, NULL); + if (error) + path_release (nd); + return error; +} + +/* + * create_watch - creates a watch on the given device. + * + * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. + * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. + */ +static struct inotify_watch *create_watch(struct inotify_device *dev, + u32 mask, struct inode *inode) +{ + struct inotify_watch *watch; + int ret; + + if (atomic_read(&dev->user->inotify_watches) >= inotify_max_user_watches) + return ERR_PTR(-ENOSPC); + + watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); + if (unlikely(!watch)) + return ERR_PTR(-ENOMEM); + + ret = inotify_dev_get_wd(dev, watch); + if (unlikely(ret)) { + kmem_cache_free(watch_cachep, watch); + return ERR_PTR(ret); + } + + watch->mask = mask; + atomic_set(&watch->count, 0); + INIT_LIST_HEAD(&watch->d_list); + INIT_LIST_HEAD(&watch->i_list); + + /* save a reference to device and bump the count to make it official */ + get_inotify_dev(dev); + watch->dev = dev; + + /* + * Save a reference to the inode and bump the ref count to make it + * official. We hold a reference to nameidata, which makes this safe. + */ + watch->inode = igrab(inode); + + /* bump our own count, corresponding to our entry in dev->watches */ + get_inotify_watch(watch); + + atomic_inc(&dev->user->inotify_watches); + + return watch; +} + +/* + * inotify_find_dev - find the watch associated with the given inode and dev + * + * Callers must hold inode->inotify_sem. + */ +static struct inotify_watch *inode_find_dev(struct inode *inode, + struct inotify_device *dev) +{ + struct inotify_watch *watch; + + list_for_each_entry(watch, &inode->inotify_watches, i_list) { + if (watch->dev == dev) + return watch; + } + + return NULL; +} + +/* + * remove_watch_no_event - remove_watch() without the IN_IGNORED event. + */ +static void remove_watch_no_event(struct inotify_watch *watch, + struct inotify_device *dev) +{ + list_del(&watch->i_list); + list_del(&watch->d_list); + + atomic_dec(&dev->user->inotify_watches); + idr_remove(&dev->idr, watch->wd); + put_inotify_watch(watch); +} + +/* + * remove_watch - Remove a watch from both the device and the inode. Sends + * the IN_IGNORED event to the given device signifying that the inode is no + * longer watched. + * + * Callers must hold both inode->inotify_sem and dev->sem. We drop a + * reference to the inode before returning. + * + * The inode is not iput() so as to remain atomic. If the inode needs to be + * iput(), the call returns one. Otherwise, it returns zero. + */ +static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) +{ + inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); + remove_watch_no_event(watch, dev); +} + +/* + * inotify_inode_watched - returns nonzero if there are watches on this inode + * and zero otherwise. We call this lockless, we do not care if we race. + */ +static inline int inotify_inode_watched(struct inode *inode) +{ + return !list_empty(&inode->inotify_watches); +} + +/* Kernel API */ + +/** + * inotify_inode_queue_event - queue an event to all watches on this inode + * @inode: inode event is originating from + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + */ +void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, + const char *name) +{ + struct inotify_watch *watch, *next; + + if (!inotify_inode_watched(inode)) + return; + + down(&inode->inotify_sem); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + u32 watch_mask = watch->mask; + if (watch_mask & mask) { + struct inotify_device *dev = watch->dev; + get_inotify_watch(watch); + down(&dev->sem); + inotify_dev_queue_event(dev, watch, mask, cookie, name); + if (watch_mask & IN_ONESHOT) + remove_watch_no_event(watch, dev); + up(&dev->sem); + put_inotify_watch(watch); + } + } + up(&inode->inotify_sem); +} +EXPORT_SYMBOL_GPL(inotify_inode_queue_event); + +/** + * inotify_dentry_parent_queue_event - queue an event to a dentry's parent + * @dentry: the dentry in question, we queue against this dentry's parent + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + */ +void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, + u32 cookie, const char *name) +{ + struct dentry *parent; + struct inode *inode; + + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + inode = parent->d_inode; + + if (inotify_inode_watched(inode)) { + dget(parent); + spin_unlock(&dentry->d_lock); + inotify_inode_queue_event(inode, mask, cookie, name); + dput(parent); + } else + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); + +/** + * inotify_get_cookie - return a unique cookie for use in synchronizing events. + */ +u32 inotify_get_cookie(void) +{ + return atomic_inc_return(&inotify_cookie); +} +EXPORT_SYMBOL_GPL(inotify_get_cookie); + +/** + * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. + * @list: list of inodes being unmounted (sb->s_inodes) + * + * Called with inode_lock held, protecting the unmounting super block's list + * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. + * We temporarily drop inode_lock, however, and CAN block. + */ +void inotify_unmount_inodes(struct list_head *list) +{ + struct inode *inode, *next_i, *need_iput = NULL; + + list_for_each_entry_safe(inode, next_i, list, i_sb_list) { + struct inotify_watch *watch, *next_w; + struct inode *need_iput_tmp; + struct list_head *watches; + + /* + * If i_count is zero, the inode cannot have any watches and + * doing an __iget/iput with MS_ACTIVE clear would actually + * evict all inodes with zero i_count from icache which is + * unnecessarily violent and may in fact be illegal to do. + */ + if (!atomic_read(&inode->i_count)) + continue; + + /* + * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or + * I_WILL_FREE which is fine because by that point the inode + * cannot have any associated watches. + */ + if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) + continue; + + need_iput_tmp = need_iput; + need_iput = NULL; + /* In case the remove_watch() drops a reference. */ + if (inode != need_iput_tmp) + __iget(inode); + else + need_iput_tmp = NULL; + /* In case the dropping of a reference would nuke next_i. */ + if ((&next_i->i_sb_list != list) && + atomic_read(&next_i->i_count) && + !(next_i->i_state & (I_CLEAR | I_FREEING | + I_WILL_FREE))) { + __iget(next_i); + need_iput = next_i; + } + + /* + * We can safely drop inode_lock here because we hold + * references on both inode and next_i. Also no new inodes + * will be added since the umount has begun. Finally, + * iprune_sem keeps shrink_icache_memory() away. + */ + spin_unlock(&inode_lock); + + if (need_iput_tmp) + iput(need_iput_tmp); + + /* for each watch, send IN_UNMOUNT and then remove it */ + down(&inode->inotify_sem); + watches = &inode->inotify_watches; + list_for_each_entry_safe(watch, next_w, watches, i_list) { + struct inotify_device *dev = watch->dev; + down(&dev->sem); + inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); + remove_watch(watch, dev); + up(&dev->sem); + } + up(&inode->inotify_sem); + iput(inode); + + spin_lock(&inode_lock); + } +} +EXPORT_SYMBOL_GPL(inotify_unmount_inodes); + +/** + * inotify_inode_is_dead - an inode has been deleted, cleanup any watches + * @inode: inode that is about to be removed + */ +void inotify_inode_is_dead(struct inode *inode) +{ + struct inotify_watch *watch, *next; + + down(&inode->inotify_sem); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + struct inotify_device *dev = watch->dev; + down(&dev->sem); + remove_watch(watch, dev); + up(&dev->sem); + } + up(&inode->inotify_sem); +} +EXPORT_SYMBOL_GPL(inotify_inode_is_dead); + +/* Device Interface */ + +static unsigned int inotify_poll(struct file *file, poll_table *wait) +{ + struct inotify_device *dev = file->private_data; + int ret = 0; + + poll_wait(file, &dev->wq, wait); + down(&dev->sem); + if (!list_empty(&dev->events)) + ret = POLLIN | POLLRDNORM; + up(&dev->sem); + + return ret; +} + +static ssize_t inotify_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t event_size = sizeof (struct inotify_event); + struct inotify_device *dev; + char __user *start; + int ret; + DEFINE_WAIT(wait); + + start = buf; + dev = file->private_data; + + while (1) { + int events; + + prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); + + down(&dev->sem); + events = !list_empty(&dev->events); + up(&dev->sem); + if (events) { + ret = 0; + break; + } + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + schedule(); + } + + finish_wait(&dev->wq, &wait); + if (ret) + return ret; + + down(&dev->sem); + while (1) { + struct inotify_kernel_event *kevent; + + ret = buf - start; + if (list_empty(&dev->events)) + break; + + kevent = inotify_dev_get_event(dev); + if (event_size + kevent->event.len > count) + break; + + if (copy_to_user(buf, &kevent->event, event_size)) { + ret = -EFAULT; + break; + } + buf += event_size; + count -= event_size; + + if (kevent->name) { + if (copy_to_user(buf, kevent->name, kevent->event.len)){ + ret = -EFAULT; + break; + } + buf += kevent->event.len; + count -= kevent->event.len; + } + + remove_kevent(dev, kevent); + } + up(&dev->sem); + + return ret; +} + +static int inotify_release(struct inode *ignored, struct file *file) +{ + struct inotify_device *dev = file->private_data; + + /* + * Destroy all of the watches on this device. Unfortunately, not very + * pretty. We cannot do a simple iteration over the list, because we + * do not know the inode until we iterate to the watch. But we need to + * hold inode->inotify_sem before dev->sem. The following works. + */ + while (1) { + struct inotify_watch *watch; + struct list_head *watches; + struct inode *inode; + + down(&dev->sem); + watches = &dev->watches; + if (list_empty(watches)) { + up(&dev->sem); + break; + } + watch = list_entry(watches->next, struct inotify_watch, d_list); + get_inotify_watch(watch); + up(&dev->sem); + + inode = watch->inode; + down(&inode->inotify_sem); + down(&dev->sem); + remove_watch_no_event(watch, dev); + up(&dev->sem); + up(&inode->inotify_sem); + put_inotify_watch(watch); + } + + /* destroy all of the events on this device */ + down(&dev->sem); + while (!list_empty(&dev->events)) + inotify_dev_event_dequeue(dev); + up(&dev->sem); + + /* free this device: the put matching the get in inotify_open() */ + put_inotify_dev(dev); + + return 0; +} + +/* + * inotify_ignore - handle the INOTIFY_IGNORE ioctl, asking that a given wd be + * removed from the device. + * + * Can sleep. + */ +static int inotify_ignore(struct inotify_device *dev, s32 wd) +{ + struct inotify_watch *watch; + struct inode *inode; + + down(&dev->sem); + watch = idr_find(&dev->idr, wd); + if (unlikely(!watch)) { + up(&dev->sem); + return -EINVAL; + } + get_inotify_watch(watch); + inode = watch->inode; + up(&dev->sem); + + down(&inode->inotify_sem); + down(&dev->sem); + + /* make sure that we did not race */ + watch = idr_find(&dev->idr, wd); + if (likely(watch)) + remove_watch(watch, dev); + + up(&dev->sem); + up(&inode->inotify_sem); + put_inotify_watch(watch); + + return 0; +} + +static long inotify_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct inotify_device *dev; + void __user *p; + int ret = -ENOTTY; + + dev = file->private_data; + p = (void __user *) arg; + + switch (cmd) { + case FIONREAD: + ret = put_user(dev->queue_size, (int __user *) p); + break; + } + + return ret; +} + +static struct file_operations inotify_fops = { + .poll = inotify_poll, + .read = inotify_read, + .release = inotify_release, + .unlocked_ioctl = inotify_ioctl, + .compat_ioctl = inotify_ioctl, +}; + +asmlinkage long sys_inotify_init(void) +{ + struct inotify_device *dev; + struct user_struct *user; + int ret = -ENOTTY; + int fd; + struct file *filp; + + fd = get_unused_fd(); + if (fd < 0) { + ret = fd; + goto out; + } + + filp = get_empty_filp(); + if (!filp) { + put_unused_fd(fd); + ret = -ENFILE; + goto out; + } + filp->f_op = &inotify_fops; + filp->f_vfsmnt = mntget(inotify_mnt); + filp->f_dentry = dget(inotify_mnt->mnt_root); + filp->f_mapping = filp->f_dentry->d_inode->i_mapping; + filp->f_mode = FMODE_READ; + filp->f_flags = O_RDONLY; + + user = get_uid(current->user); + + if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_devices)) { + ret = -EMFILE; + goto out_err; + } + + dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); + if (unlikely(!dev)) { + ret = -ENOMEM; + goto out_err; + } + + idr_init(&dev->idr); + INIT_LIST_HEAD(&dev->events); + INIT_LIST_HEAD(&dev->watches); + init_waitqueue_head(&dev->wq); + sema_init(&dev->sem, 1); + dev->event_count = 0; + dev->queue_size = 0; + dev->max_events = inotify_max_queued_events; + dev->user = user; + atomic_set(&dev->count, 0); + + get_inotify_dev(dev); + atomic_inc(&user->inotify_devs); + + filp->private_data = dev; + fd_install (fd, filp); + return fd; +out_err: + put_unused_fd (fd); + put_filp (filp); + free_uid(user); +out: + return ret; +} + +asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) +{ + struct inotify_watch *watch, *old; + struct inode *inode; + struct inotify_device *dev; + struct nameidata nd; + struct file *filp; + int ret; + + filp = fget(fd); + if (!filp) + return -EBADF; + + dev = filp->private_data; + + ret = find_inode ((const char __user*)path, &nd); + if (ret) + goto fput_and_out; + + /* Held in place by reference in nd */ + inode = nd.dentry->d_inode; + + down(&inode->inotify_sem); + down(&dev->sem); + + /* don't let user-space set invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS; + if (!mask) { + ret = -EINVAL; + goto out; + } + + /* + * Handle the case of re-adding a watch on an (inode,dev) pair that we + * are already watching. We just update the mask and return its wd. + */ + old = inode_find_dev(inode, dev); + if (unlikely(old)) { + old->mask = mask; + ret = old->wd; + goto out; + } + + watch = create_watch(dev, mask, inode); + if (unlikely(IS_ERR(watch))) { + ret = PTR_ERR(watch); + goto out; + } + + /* Add the watch to the device's and the inode's list */ + list_add(&watch->d_list, &dev->watches); + list_add(&watch->i_list, &inode->inotify_watches); + ret = watch->wd; +out: + path_release (&nd); + up(&dev->sem); + up(&inode->inotify_sem); +fput_and_out: + fput(filp); + return ret; +} + +asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) +{ + struct file *filp; + struct inotify_device *dev; + int ret; + + filp = fget(fd); + if (!filp) + return -EBADF; + dev = filp->private_data; + ret = inotify_ignore (dev, wd); + fput(filp); + return ret; +} + +static struct super_block * +inotify_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); +} + +static struct file_system_type inotify_fs_type = { + .name = "inotifyfs", + .get_sb = inotify_get_sb, + .kill_sb = kill_anon_super, +}; + +/* + * inotify_init - Our initialization function. Note that we cannnot return + * error because we have compiled-in VFS hooks. So an (unlikely) failure here + * must result in panic(). + */ +static int __init inotify_init(void) +{ + register_filesystem(&inotify_fs_type); + inotify_mnt = kern_mount(&inotify_fs_type); + + inotify_max_queued_events = 8192; + inotify_max_user_devices = 128; + inotify_max_user_watches = 8192; + + atomic_set(&inotify_cookie, 0); + + watch_cachep = kmem_cache_create("inotify_watch_cache", + sizeof(struct inotify_watch), + 0, SLAB_PANIC, NULL, NULL); + event_cachep = kmem_cache_create("inotify_event_cache", + sizeof(struct inotify_kernel_event), + 0, SLAB_PANIC, NULL, NULL); + + printk(KERN_INFO "inotify syscall\n"); + + return 0; +} + +module_init(inotify_init); diff --git a/fs/namei.c b/fs/namei.c index 1d93cb4f7c5f..02a824cd3c5c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -1312,7 +1312,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, DQUOT_INIT(dir); error = dir->i_op->create(dir, dentry, mode, nd); if (!error) { - inode_dir_notify(dir, DN_CREATE); + fsnotify_create(dir, dentry->d_name.name); security_inode_post_create(dir, dentry, mode); } return error; @@ -1637,7 +1637,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) DQUOT_INIT(dir); error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) { - inode_dir_notify(dir, DN_CREATE); + fsnotify_create(dir, dentry->d_name.name); security_inode_post_mknod(dir, dentry, mode, dev); } return error; @@ -1710,7 +1710,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) DQUOT_INIT(dir); error = dir->i_op->mkdir(dir, dentry, mode); if (!error) { - inode_dir_notify(dir, DN_CREATE); + fsnotify_mkdir(dir, dentry->d_name.name); security_inode_post_mkdir(dir,dentry, mode); } return error; @@ -1801,7 +1801,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) } up(&dentry->d_inode->i_sem); if (!error) { - inode_dir_notify(dir, DN_DELETE); + fsnotify_rmdir(dentry, dentry->d_inode, dir); d_delete(dentry); } dput(dentry); @@ -1874,9 +1874,10 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { + fsnotify_unlink(dentry, dir); d_delete(dentry); - inode_dir_notify(dir, DN_DELETE); } + return error; } @@ -1950,7 +1951,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, i DQUOT_INIT(dir); error = dir->i_op->symlink(dir, dentry, oldname); if (!error) { - inode_dir_notify(dir, DN_CREATE); + fsnotify_create(dir, dentry->d_name.name); security_inode_post_symlink(dir, dentry, oldname); } return error; @@ -2023,7 +2024,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de error = dir->i_op->link(old_dentry, dir, new_dentry); up(&old_dentry->d_inode->i_sem); if (!error) { - inode_dir_notify(dir, DN_CREATE); + fsnotify_create(dir, new_dentry->d_name.name); security_inode_post_link(old_dentry, dir, new_dentry); } return error; @@ -2187,6 +2188,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, { int error; int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); + const char *old_name; if (old_dentry->d_inode == new_dentry->d_inode) return 0; @@ -2208,18 +2210,18 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, DQUOT_INIT(old_dir); DQUOT_INIT(new_dir); + old_name = fsnotify_oldname_init(old_dentry->d_name.name); + if (is_dir) error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) { - if (old_dir == new_dir) - inode_dir_notify(old_dir, DN_RENAME); - else { - inode_dir_notify(old_dir, DN_DELETE); - inode_dir_notify(new_dir, DN_CREATE); - } + const char *new_name = old_dentry->d_name.name; + fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir); } + fsnotify_oldname_free(old_name); + return error; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 5e0bf3917607..4f2cd3d27566 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -45,7 +45,7 @@ #endif /* CONFIG_NFSD_V3 */ #include #include -#include +#include #include #include #ifdef CONFIG_NFSD_V4 @@ -860,7 +860,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, nfsdstats.io_read += err; *count = err; err = 0; - dnotify_parent(file->f_dentry, DN_ACCESS); + fsnotify_access(file->f_dentry); } else err = nfserrno(err); out: @@ -916,7 +916,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, set_fs(oldfs); if (err >= 0) { nfsdstats.io_write += cnt; - dnotify_parent(file->f_dentry, DN_MODIFY); + fsnotify_modify(file->f_dentry); } /* clear setuid/setgid flag after write */ diff --git a/fs/open.c b/fs/open.c index 3f4a4286fdc4..32bf05e2996d 100644 --- a/fs/open.c +++ b/fs/open.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -951,6 +951,7 @@ asmlinkage long sys_open(const char __user * filename, int flags, int mode) put_unused_fd(fd); fd = PTR_ERR(f); } else { + fsnotify_open(f->f_dentry); fd_install(fd, f); } } diff --git a/fs/read_write.c b/fs/read_write.c index 9292f5fa4d62..563abd09b5c8 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -252,7 +252,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) else ret = do_sync_read(file, buf, count, pos); if (ret > 0) { - dnotify_parent(file->f_dentry, DN_ACCESS); + fsnotify_access(file->f_dentry); current->rchar += ret; } current->syscr++; @@ -303,7 +303,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { - dnotify_parent(file->f_dentry, DN_MODIFY); + fsnotify_modify(file->f_dentry); current->wchar += ret; } current->syscw++; @@ -539,9 +539,12 @@ static ssize_t do_readv_writev(int type, struct file *file, out: if (iov != iovstack) kfree(iov); - if ((ret + (type == READ)) > 0) - dnotify_parent(file->f_dentry, - (type == READ) ? DN_ACCESS : DN_MODIFY); + if ((ret + (type == READ)) > 0) { + if (type == READ) + fsnotify_access(file->f_dentry); + else + fsnotify_modify(file->f_dentry); + } return ret; Efault: ret = -EFAULT; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index d72c1ce48559..335288b9be0f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -3,7 +3,7 @@ */ #include -#include +#include #include #include #include @@ -391,9 +391,6 @@ int sysfs_create_file(struct kobject * kobj, const struct attribute * attr) * sysfs_update_file - update the modified timestamp on an object attribute. * @kobj: object we're acting for. * @attr: attribute descriptor. - * - * Also call dnotify for the dentry, which lots of userspace programs - * use. */ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr) { @@ -408,7 +405,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr) if (victim->d_inode && (victim->d_parent->d_inode == dir->d_inode)) { victim->d_inode->i_mtime = CURRENT_TIME; - dnotify_parent(victim, DN_MODIFY); + fsnotify_modify(victim); /** * Drop reference from initial sysfs_get_dentry(). diff --git a/fs/xattr.c b/fs/xattr.c index 93dee70a1dbe..6acd5c63da91 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -16,6 +16,7 @@ #include #include #include +#include #include /* @@ -57,8 +58,10 @@ setxattr(struct dentry *d, char __user *name, void __user *value, if (error) goto out; error = d->d_inode->i_op->setxattr(d, kname, kvalue, size, flags); - if (!error) + if (!error) { + fsnotify_xattr(d); security_inode_post_setxattr(d, kname, kvalue, size, flags); + } out: up(&d->d_inode->i_sem); } diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index e25e4c71a879..a7cb377745bf 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -296,8 +296,11 @@ #define __NR_keyctl 288 #define __NR_ioprio_set 289 #define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 -#define NR_syscalls 291 +#define NR_syscalls 294 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/linux/fs.h b/include/linux/fs.h index 302ec20838ca..c9bf3746a9fb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -474,6 +474,11 @@ struct inode { struct dnotify_struct *i_dnotify; /* for directory notifications */ #endif +#ifdef CONFIG_INOTIFY + struct list_head inotify_watches; /* watches on this inode */ + struct semaphore inotify_sem; /* protects the watches list */ +#endif + unsigned long i_state; unsigned long dirtied_when; /* jiffies of first dirtying */ @@ -1393,7 +1398,6 @@ extern void emergency_remount(void); extern int do_remount_sb(struct super_block *sb, int flags, void *data, int force); extern sector_t bmap(struct inode *, sector_t); -extern int setattr_mask(unsigned int); extern int notify_change(struct dentry *, struct iattr *); extern int permission(struct inode *, int, struct nameidata *); extern int generic_permission(struct inode *, int, diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h new file mode 100644 index 000000000000..eb581b6cfca9 --- /dev/null +++ b/include/linux/fsnotify.h @@ -0,0 +1,248 @@ +#ifndef _LINUX_FS_NOTIFY_H +#define _LINUX_FS_NOTIFY_H + +/* + * include/linux/fsnotify.h - generic hooks for filesystem notification, to + * reduce in-source duplication from both dnotify and inotify. + * + * We don't compile any of this away in some complicated menagerie of ifdefs. + * Instead, we rely on the code inside to optimize away as needed. + * + * (C) Copyright 2005 Robert Love + */ + +#ifdef __KERNEL__ + +#include +#include + +/* + * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir + */ +static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + const char *old_name, const char *new_name, + int isdir) +{ + u32 cookie = inotify_get_cookie(); + + if (old_dir == new_dir) + inode_dir_notify(old_dir, DN_RENAME); + else { + inode_dir_notify(old_dir, DN_DELETE); + inode_dir_notify(new_dir, DN_CREATE); + } + + if (isdir) + isdir = IN_ISDIR; + inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name); + inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name); +} + +/* + * fsnotify_unlink - file was unlinked + */ +static inline void fsnotify_unlink(struct dentry *dentry, struct inode *dir) +{ + struct inode *inode = dentry->d_inode; + + inode_dir_notify(dir, DN_DELETE); + inotify_inode_queue_event(dir, IN_DELETE, 0, dentry->d_name.name); + inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL); + + inotify_inode_is_dead(inode); +} + +/* + * fsnotify_rmdir - directory was removed + */ +static inline void fsnotify_rmdir(struct dentry *dentry, struct inode *inode, + struct inode *dir) +{ + inode_dir_notify(dir, DN_DELETE); + inotify_inode_queue_event(dir,IN_DELETE|IN_ISDIR,0,dentry->d_name.name); + inotify_inode_queue_event(inode, IN_DELETE_SELF | IN_ISDIR, 0, NULL); + inotify_inode_is_dead(inode); +} + +/* + * fsnotify_create - 'name' was linked in + */ +static inline void fsnotify_create(struct inode *inode, const char *name) +{ + inode_dir_notify(inode, DN_CREATE); + inotify_inode_queue_event(inode, IN_CREATE, 0, name); +} + +/* + * fsnotify_mkdir - directory 'name' was created + */ +static inline void fsnotify_mkdir(struct inode *inode, const char *name) +{ + inode_dir_notify(inode, DN_CREATE); + inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, name); +} + +/* + * fsnotify_access - file was read + */ +static inline void fsnotify_access(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + u32 mask = IN_ACCESS; + + if (S_ISDIR(inode->i_mode)) + mask |= IN_ISDIR; + + dnotify_parent(dentry, DN_ACCESS); + inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); + inotify_inode_queue_event(inode, mask, 0, NULL); +} + +/* + * fsnotify_modify - file was modified + */ +static inline void fsnotify_modify(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + u32 mask = IN_MODIFY; + + if (S_ISDIR(inode->i_mode)) + mask |= IN_ISDIR; + + dnotify_parent(dentry, DN_MODIFY); + inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); + inotify_inode_queue_event(inode, mask, 0, NULL); +} + +/* + * fsnotify_open - file was opened + */ +static inline void fsnotify_open(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + u32 mask = IN_OPEN; + + if (S_ISDIR(inode->i_mode)) + mask |= IN_ISDIR; + + inotify_inode_queue_event(inode, mask, 0, NULL); + inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); +} + +/* + * fsnotify_close - file was closed + */ +static inline void fsnotify_close(struct file *file) +{ + struct dentry *dentry = file->f_dentry; + struct inode *inode = dentry->d_inode; + const char *name = dentry->d_name.name; + mode_t mode = file->f_mode; + u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; + + if (S_ISDIR(inode->i_mode)) + mask |= IN_ISDIR; + + inotify_dentry_parent_queue_event(dentry, mask, 0, name); + inotify_inode_queue_event(inode, mask, 0, NULL); +} + +/* + * fsnotify_xattr - extended attributes were changed + */ +static inline void fsnotify_xattr(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + u32 mask = IN_ATTRIB; + + if (S_ISDIR(inode->i_mode)) + mask |= IN_ISDIR; + + inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); + inotify_inode_queue_event(inode, mask, 0, NULL); +} + +/* + * fsnotify_change - notify_change event. file was modified and/or metadata + * was changed. + */ +static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) +{ + struct inode *inode = dentry->d_inode; + int dn_mask = 0; + u32 in_mask = 0; + + if (ia_valid & ATTR_UID) { + in_mask |= IN_ATTRIB; + dn_mask |= DN_ATTRIB; + } + if (ia_valid & ATTR_GID) { + in_mask |= IN_ATTRIB; + dn_mask |= DN_ATTRIB; + } + if (ia_valid & ATTR_SIZE) { + in_mask |= IN_MODIFY; + dn_mask |= DN_MODIFY; + } + /* both times implies a utime(s) call */ + if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) + { + in_mask |= IN_ATTRIB; + dn_mask |= DN_ATTRIB; + } else if (ia_valid & ATTR_ATIME) { + in_mask |= IN_ACCESS; + dn_mask |= DN_ACCESS; + } else if (ia_valid & ATTR_MTIME) { + in_mask |= IN_MODIFY; + dn_mask |= DN_MODIFY; + } + if (ia_valid & ATTR_MODE) { + in_mask |= IN_ATTRIB; + dn_mask |= DN_ATTRIB; + } + + if (dn_mask) + dnotify_parent(dentry, dn_mask); + if (in_mask) { + if (S_ISDIR(inode->i_mode)) + in_mask |= IN_ISDIR; + inotify_inode_queue_event(inode, in_mask, 0, NULL); + inotify_dentry_parent_queue_event(dentry, in_mask, 0, + dentry->d_name.name); + } +} + +#ifdef CONFIG_INOTIFY /* inotify helpers */ + +/* + * fsnotify_oldname_init - save off the old filename before we change it + */ +static inline const char *fsnotify_oldname_init(const char *name) +{ + return kstrdup(name, GFP_KERNEL); +} + +/* + * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init + */ +static inline void fsnotify_oldname_free(const char *old_name) +{ + kfree(old_name); +} + +#else /* CONFIG_INOTIFY */ + +static inline const char *fsnotify_oldname_init(const char *name) +{ + return NULL; +} + +static inline void fsnotify_oldname_free(const char *old_name) +{ +} + +#endif /* ! CONFIG_INOTIFY */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/inotify.h b/include/linux/inotify.h new file mode 100644 index 000000000000..a40c2bf0408e --- /dev/null +++ b/include/linux/inotify.h @@ -0,0 +1,108 @@ +/* + * Inode based directory notification for Linux + * + * Copyright (C) 2005 John McCutchan + */ + +#ifndef _LINUX_INOTIFY_H +#define _LINUX_INOTIFY_H + +#include + +/* + * struct inotify_event - structure read from the inotify device for each event + * + * When you are watching a directory, you will receive the filename for events + * such as IN_CREATE, IN_DELETE, IN_OPEN, IN_CLOSE, ..., relative to the wd. + */ +struct inotify_event { + __s32 wd; /* watch descriptor */ + __u32 mask; /* watch mask */ + __u32 cookie; /* cookie to synchronize two events */ + __u32 len; /* length (including nulls) of name */ + char name[0]; /* stub for possible name */ +}; + +/* the following are legal, implemented events that user-space can watch for */ +#define IN_ACCESS 0x00000001 /* File was accessed */ +#define IN_MODIFY 0x00000002 /* File was modified */ +#define IN_ATTRIB 0x00000004 /* Metadata changed */ +#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ +#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define IN_OPEN 0x00000020 /* File was opened */ +#define IN_MOVED_FROM 0x00000040 /* File was moved from X */ +#define IN_MOVED_TO 0x00000080 /* File was moved to Y */ +#define IN_CREATE 0x00000100 /* Subfile was created */ +#define IN_DELETE 0x00000200 /* Subfile was deleted */ +#define IN_DELETE_SELF 0x00000400 /* Self was deleted */ + +/* the following are legal events. they are sent as needed to any watch */ +#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */ +#define IN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define IN_IGNORED 0x00008000 /* File was ignored */ + +/* helper events */ +#define IN_CLOSE (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) /* close */ +#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */ + +/* special flags */ +#define IN_ISDIR 0x40000000 /* event occurred against dir */ +#define IN_ONESHOT 0x80000000 /* only send event once */ + +/* + * All of the events - we build the list by hand so that we can add flags in + * the future and not break backward compatibility. Apps will get only the + * events that they originally wanted. Be sure to add new events here! + */ +#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ + IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ + IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF) + +#ifdef __KERNEL__ + +#include +#include +#include + +#ifdef CONFIG_INOTIFY + +extern void inotify_inode_queue_event(struct inode *, __u32, __u32, + const char *); +extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32, + const char *); +extern void inotify_unmount_inodes(struct list_head *); +extern void inotify_inode_is_dead(struct inode *); +extern u32 inotify_get_cookie(void); + +#else + +static inline void inotify_inode_queue_event(struct inode *inode, + __u32 mask, __u32 cookie, + const char *filename) +{ +} + +static inline void inotify_dentry_parent_queue_event(struct dentry *dentry, + __u32 mask, __u32 cookie, + const char *filename) +{ +} + +static inline void inotify_unmount_inodes(struct list_head *list) +{ +} + +static inline void inotify_inode_is_dead(struct inode *inode) +{ +} + +static inline u32 inotify_get_cookie(void) +{ + return 0; +} + +#endif /* CONFIG_INOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* _LINUX_INOTIFY_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index ff48815bd3a2..dec5827c7742 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -410,6 +410,10 @@ struct user_struct { atomic_t processes; /* How many processes does this user have? */ atomic_t files; /* How many open files does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_INOTIFY + atomic_t inotify_watches; /* How many inotify watches does this user have? */ + atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ +#endif /* protected by mq_lock */ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ unsigned long locked_shm; /* How many pages of mlocked shm ? */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 5b5f434ac9a0..ce19a2aa0b21 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -61,7 +61,8 @@ enum CTL_DEV=7, /* Devices */ CTL_BUS=8, /* Busses */ CTL_ABI=9, /* Binary emulation */ - CTL_CPU=10 /* CPU stuff (speed scaling, etc) */ + CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ + CTL_INOTIFY=11 /* Inotify */ }; /* CTL_BUS names: */ @@ -70,6 +71,14 @@ enum CTL_BUS_ISA=1 /* ISA */ }; +/* CTL_INOTIFY names: */ +enum +{ + INOTIFY_MAX_USER_DEVICES=1, /* max number of inotify device instances per user */ + INOTIFY_MAX_USER_WATCHES=2, /* max number of inotify watches per user */ + INOTIFY_MAX_QUEUED_EVENTS=3 /* Max number of queued events per inotify device instance */ +}; + /* CTL_KERN names: */ enum { diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 29196ce9b40f..42b40ae5eada 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -80,6 +80,9 @@ cond_syscall(sys_keyctl); cond_syscall(compat_sys_keyctl); cond_syscall(compat_sys_socketcall); cond_syscall(sys_set_zone_reclaim); +cond_syscall(sys_inotify_init); +cond_syscall(sys_inotify_add_watch); +cond_syscall(sys_inotify_rm_watch); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 270ee7fadbd8..b240e2cb86fc 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -67,6 +67,12 @@ extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; extern int pid_max_min, pid_max_max; +#ifdef CONFIG_INOTIFY +extern int inotify_max_user_devices; +extern int inotify_max_user_watches; +extern int inotify_max_queued_events; +#endif + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, @@ -218,6 +224,7 @@ static ctl_table root_table[] = { .mode = 0555, .child = dev_table, }, + { .ctl_name = 0 } }; @@ -959,6 +966,40 @@ static ctl_table fs_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_INOTIFY + { + .ctl_name = INOTIFY_MAX_USER_DEVICES, + .procname = "max_user_devices", + .data = &inotify_max_user_devices, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + + { + .ctl_name = INOTIFY_MAX_USER_WATCHES, + .procname = "max_user_watches", + .data = &inotify_max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + + { + .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, + .procname = "max_queued_events", + .data = &inotify_max_queued_events, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero + }, +#endif { .ctl_name = 0 } }; @@ -968,7 +1009,7 @@ static ctl_table debug_table[] = { static ctl_table dev_table[] = { { .ctl_name = 0 } -}; +}; extern void init_irq_proc (void); diff --git a/kernel/user.c b/kernel/user.c index 734575d55769..89e562feb1b1 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -120,6 +120,10 @@ struct user_struct * alloc_uid(uid_t uid) atomic_set(&new->processes, 0); atomic_set(&new->files, 0); atomic_set(&new->sigpending, 0); +#ifdef CONFIG_INOTIFY + atomic_set(&new->inotify_watches, 0); + atomic_set(&new->inotify_devs, 0); +#endif new->mq_bytes = 0; new->locked_shm = 0; -- cgit v1.2.3 From 0399cb08c54708db231d616f106f64d920e0b723 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 13 Jul 2005 12:38:18 -0400 Subject: [PATCH] inotify: move sysctl This moves the inotify sysctl knobs to "/proc/sys/fs/inotify" from "/proc/sys/fs". Also some related cleanup. Signed-off-by: Robert Love Signed-off-by: Linus Torvalds --- fs/inotify.c | 49 ++++++++++++++++++++++++++++++++++++++++++++---- include/linux/sysctl.h | 12 ++++++------ kernel/sysctl.c | 51 +++++++++++--------------------------------------- 3 files changed, 62 insertions(+), 50 deletions(-) (limited to 'include/linux/sysctl.h') diff --git a/fs/inotify.c b/fs/inotify.c index e423bfe0c86f..fb4803131423 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -45,8 +45,8 @@ static kmem_cache_t *event_cachep; static struct vfsmount *inotify_mnt; -/* These are configurable via /proc/sys/inotify */ -int inotify_max_user_devices; +/* these are configurable via /proc/sys/fs/inotify/ */ +int inotify_max_user_instances; int inotify_max_user_watches; int inotify_max_queued_events; @@ -125,6 +125,47 @@ struct inotify_watch { u32 mask; /* event mask for this watch */ }; +#ifdef CONFIG_SYSCTL + +#include + +static int zero; + +ctl_table inotify_table[] = { + { + .ctl_name = INOTIFY_MAX_USER_INSTANCES, + .procname = "max_user_instances", + .data = &inotify_max_user_instances, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_USER_WATCHES, + .procname = "max_user_watches", + .data = &inotify_max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, + .procname = "max_queued_events", + .data = &inotify_max_queued_events, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero + }, + { .ctl_name = 0 } +}; +#endif /* CONFIG_SYSCTL */ + static inline void get_inotify_dev(struct inotify_device *dev) { atomic_inc(&dev->count); @@ -842,7 +883,7 @@ asmlinkage long sys_inotify_init(void) user = get_uid(current->user); - if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_devices)) { + if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_instances)) { ret = -EMFILE; goto out_err; } @@ -979,7 +1020,7 @@ static int __init inotify_init(void) inotify_mnt = kern_mount(&inotify_fs_type); inotify_max_queued_events = 8192; - inotify_max_user_devices = 128; + inotify_max_user_instances = 8; inotify_max_user_watches = 8192; atomic_set(&inotify_cookie, 0); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index ce19a2aa0b21..bfbbe94b297d 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -61,8 +61,7 @@ enum CTL_DEV=7, /* Devices */ CTL_BUS=8, /* Busses */ CTL_ABI=9, /* Binary emulation */ - CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ - CTL_INOTIFY=11 /* Inotify */ + CTL_CPU=10 /* CPU stuff (speed scaling, etc) */ }; /* CTL_BUS names: */ @@ -71,12 +70,12 @@ enum CTL_BUS_ISA=1 /* ISA */ }; -/* CTL_INOTIFY names: */ +/* /proc/sys/fs/inotify/ */ enum { - INOTIFY_MAX_USER_DEVICES=1, /* max number of inotify device instances per user */ - INOTIFY_MAX_USER_WATCHES=2, /* max number of inotify watches per user */ - INOTIFY_MAX_QUEUED_EVENTS=3 /* Max number of queued events per inotify device instance */ + INOTIFY_MAX_USER_INSTANCES=1, /* max instances per user */ + INOTIFY_MAX_USER_WATCHES=2, /* max watches per user */ + INOTIFY_MAX_QUEUED_EVENTS=3 /* max queued events per instance */ }; /* CTL_KERN names: */ @@ -685,6 +684,7 @@ enum FS_XFS=17, /* struct: control xfs parameters */ FS_AIO_NR=18, /* current system-wide number of aio requests */ FS_AIO_MAX_NR=19, /* system-wide maximum number of aio requests */ + FS_INOTIFY=20, /* inotify submenu */ }; /* /proc/sys/fs/quota/ */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b240e2cb86fc..e60b9c36f1f0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -67,12 +67,6 @@ extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; extern int pid_max_min, pid_max_max; -#ifdef CONFIG_INOTIFY -extern int inotify_max_user_devices; -extern int inotify_max_user_watches; -extern int inotify_max_queued_events; -#endif - #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, @@ -152,6 +146,9 @@ extern ctl_table random_table[]; #ifdef CONFIG_UNIX98_PTYS extern ctl_table pty_table[]; #endif +#ifdef CONFIG_INOTIFY +extern ctl_table inotify_table[]; +#endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; @@ -957,6 +954,14 @@ static ctl_table fs_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_INOTIFY + { + .ctl_name = FS_INOTIFY, + .procname = "inotify", + .mode = 0555, + .child = inotify_table, + }, +#endif #endif { .ctl_name = KERN_SETUID_DUMPABLE, @@ -966,40 +971,6 @@ static ctl_table fs_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, -#ifdef CONFIG_INOTIFY - { - .ctl_name = INOTIFY_MAX_USER_DEVICES, - .procname = "max_user_devices", - .data = &inotify_max_user_devices, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - - { - .ctl_name = INOTIFY_MAX_USER_WATCHES, - .procname = "max_user_watches", - .data = &inotify_max_user_watches, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - - { - .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, - .procname = "max_queued_events", - .data = &inotify_max_queued_events, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero - }, -#endif { .ctl_name = 0 } }; -- cgit v1.2.3 From 951f22d5b1f0eaae35dafc669e3774a0c2084d10 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 27 Jul 2005 11:44:57 -0700 Subject: [PATCH] s390: spin lock retry Split spin lock and r/w lock implementation into a single try which is done inline and an out of line function that repeatedly tries to get the lock before doing the cpu_relax(). Add a system control to set the number of retries before a cpu is yielded. The reason for the spin lock retry is that the diagnose 0x44 that is used to give up the virtual cpu is quite expensive. For spin locks that are held only for a short period of time the costs of the diagnoses outweights the savings for spin locks that are held for a longer timer. The default retry count is 1000. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/head64.S | 3 - arch/s390/kernel/setup.c | 6 -- arch/s390/lib/Makefile | 4 +- arch/s390/lib/spinlock.c | 133 +++++++++++++++++++++++ include/asm-s390/lowcore.h | 4 +- include/asm-s390/processor.h | 5 +- include/asm-s390/spinlock.h | 252 ++++++++++++++----------------------------- include/linux/sysctl.h | 1 + kernel/sysctl.c | 12 ++- 9 files changed, 230 insertions(+), 190 deletions(-) create mode 100644 arch/s390/lib/spinlock.c (limited to 'include/linux/sysctl.h') diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index f525c0c21250..28c50bdf7d40 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -658,10 +658,8 @@ startup:basr %r13,0 # get base # la %r1,0f-.LPG1(%r13) # set program check address stg %r1,__LC_PGM_NEW_PSW+8 - mvc __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13) diag 0,0,0x44 # test diag 0x44 oi 7(%r12),32 # set diag44 flag - mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13) 0: # @@ -702,7 +700,6 @@ startup:basr %r13,0 # get base .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 -.Ldiag44:.long 0x83000044 .org PARMAREA-64 .Lduct: .long 0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a12183989a79..5ba5a5485da9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -431,12 +431,6 @@ setup_lowcore(void) ctl_set_bit(14, 29); } #endif -#ifdef CONFIG_ARCH_S390X - if (MACHINE_HAS_DIAG44) - lc->diag44_opcode = 0x83000044; - else - lc->diag44_opcode = 0x07000700; -#endif /* CONFIG_ARCH_S390X */ set_prefix((u32)(unsigned long) lc); } diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index a8758b1d20a9..b701efa1f00e 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -5,5 +5,5 @@ EXTRA_AFLAGS := -traditional lib-y += delay.o string.o -lib-$(CONFIG_ARCH_S390_31) += uaccess.o -lib-$(CONFIG_ARCH_S390X) += uaccess64.o +lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o +lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c new file mode 100644 index 000000000000..888b5596c195 --- /dev/null +++ b/arch/s390/lib/spinlock.c @@ -0,0 +1,133 @@ +/* + * arch/s390/lib/spinlock.c + * Out of line spinlock code. + * + * S390 version + * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include +#include +#include +#include +#include + +atomic_t spin_retry_counter; +int spin_retry = 1000; + +/** + * spin_retry= parameter + */ +static int __init spin_retry_setup(char *str) +{ + spin_retry = simple_strtoul(str, &str, 0); + return 1; +} +__setup("spin_retry=", spin_retry_setup); + +static inline void +_diag44(void) +{ +#ifdef __s390x__ + if (MACHINE_HAS_DIAG44) +#endif + asm volatile("diag 0,0,0x44"); +} + +void +_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) +{ + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + _diag44(); + count = spin_retry; + } + atomic_inc(&spin_retry_counter); + if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_spin_lock_wait); + +int +_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) +{ + int count = spin_retry; + + while (count-- > 0) { + atomic_inc(&spin_retry_counter); + if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_spin_trylock_retry); + +void +_raw_read_lock_wait(rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + _diag44(); + count = spin_retry; + } + atomic_inc(&spin_retry_counter); + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait); + +int +_raw_read_trylock_retry(rwlock_t *rw) +{ + unsigned int old; + int count = spin_retry; + + while (count-- > 0) { + atomic_inc(&spin_retry_counter); + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_read_trylock_retry); + +void +_raw_write_lock_wait(rwlock_t *rw) +{ + int count = spin_retry; + + while (1) { + if (count-- <= 0) { + _diag44(); + count = spin_retry; + } + atomic_inc(&spin_retry_counter); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +int +_raw_write_trylock_retry(rwlock_t *rw) +{ + int count = spin_retry; + + while (count-- > 0) { + atomic_inc(&spin_retry_counter); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return 1; + } + return 0; +} +EXPORT_SYMBOL(_raw_write_trylock_retry); diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 76b5b19c0ae2..afe6a9f9b0ae 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -90,7 +90,6 @@ #define __LC_SYSTEM_TIMER 0x278 #define __LC_LAST_UPDATE_CLOCK 0x280 #define __LC_STEAL_CLOCK 0x288 -#define __LC_DIAG44_OPCODE 0x290 #define __LC_KERNEL_STACK 0xD40 #define __LC_THREAD_INFO 0xD48 #define __LC_ASYNC_STACK 0xD50 @@ -286,8 +285,7 @@ struct _lowcore __u64 system_timer; /* 0x278 */ __u64 last_update_clock; /* 0x280 */ __u64 steal_clock; /* 0x288 */ - __u32 diag44_opcode; /* 0x290 */ - __u8 pad8[0xc00-0x294]; /* 0x294 */ + __u8 pad8[0xc00-0x290]; /* 0x290 */ /* System info area */ __u64 save_area[16]; /* 0xc00 */ __u8 pad9[0xd40-0xc80]; /* 0xc80 */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 8bd14de69e35..4ec652ebb3b1 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -203,7 +203,10 @@ unsigned long get_wchan(struct task_struct *p); # define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory") #else /* __s390x__ */ # define cpu_relax() \ - asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") + do { \ + if (MACHINE_HAS_DIAG44) \ + asm volatile ("diag 0,0,68" : : : "memory"); \ + } while (0) #endif /* __s390x__ */ /* diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 53cc736b9820..8ff10300f7ee 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h @@ -11,21 +11,16 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#ifdef __s390x__ -/* - * Grmph, take care of %&#! user space programs that include - * asm/spinlock.h. The diagnose is only available in kernel - * context. - */ -#ifdef __KERNEL__ -#include -#define __DIAG44_INSN "ex" -#define __DIAG44_OPERAND __LC_DIAG44_OPCODE -#else -#define __DIAG44_INSN "#" -#define __DIAG44_OPERAND 0 -#endif -#endif /* __s390x__ */ +static inline int +_raw_compare_and_swap(volatile unsigned int *lock, + unsigned int old, unsigned int new) +{ + asm volatile ("cs %0,%3,0(%4)" + : "=d" (old), "=m" (*lock) + : "0" (old), "d" (new), "a" (lock), "m" (*lock) + : "cc", "memory" ); + return old; +} /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -41,58 +36,35 @@ typedef struct { #endif } __attribute__ ((aligned (4))) spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } -#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) -#define spin_is_locked(x) ((x)->lock != 0) +#define spin_is_locked(x) ((x)->lock != 0) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) -extern inline void _raw_spin_lock(spinlock_t *lp) +extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); +extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); + +static inline void _raw_spin_lock(spinlock_t *lp) { -#ifndef __s390x__ - unsigned int reg1, reg2; - __asm__ __volatile__(" bras %0,1f\n" - "0: diag 0,0,68\n" - "1: slr %1,%1\n" - " cs %1,%0,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock) - : "cc", "memory" ); -#else /* __s390x__ */ - unsigned long reg1, reg2; - __asm__ __volatile__(" bras %1,1f\n" - "0: " __DIAG44_INSN " 0,%4\n" - "1: slr %0,%0\n" - " cs %0,%1,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "i" (__DIAG44_OPERAND), - "m" (lp->lock) : "cc", "memory" ); -#endif /* __s390x__ */ + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) + _raw_spin_lock_wait(lp, pc); } -extern inline int _raw_spin_trylock(spinlock_t *lp) +static inline int _raw_spin_trylock(spinlock_t *lp) { - unsigned long reg; - unsigned int result; - - __asm__ __volatile__(" basr %1,0\n" - "0: cs %0,%1,0(%3)" - : "=d" (result), "=&d" (reg), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock), "0" (0) - : "cc", "memory" ); - return !result; + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) + return 1; + return _raw_spin_trylock_retry(lp, pc); } -extern inline void _raw_spin_unlock(spinlock_t *lp) +static inline void _raw_spin_unlock(spinlock_t *lp) { - unsigned int old; - - __asm__ __volatile__("cs %0,%3,0(%4)" - : "=d" (old), "=m" (lp->lock) - : "0" (lp->lock), "d" (0), "a" (lp) - : "cc", "memory" ); + _raw_compare_and_swap(&lp->lock, lp->lock, 0); } /* @@ -106,7 +78,7 @@ extern inline void _raw_spin_unlock(spinlock_t *lp) * read-locks. */ typedef struct { - volatile unsigned long lock; + volatile unsigned int lock; volatile unsigned long owner_pc; #ifdef CONFIG_PREEMPT unsigned int break_lock; @@ -129,123 +101,55 @@ typedef struct { */ #define write_can_lock(x) ((x)->lock == 0) -#ifndef __s390x__ -#define _raw_read_lock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: la 2,0(2)\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " cs 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_lock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " csg 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_read_unlock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lr 3,2\n" \ - " ahi 3,-1\n" /* one less reader */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_unlock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: lgr 3,2\n" \ - " bctgr 3,0\n" /* one less reader */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_lock(rw) \ - asm volatile(" lhi 3,1\n" \ - " sll 3,31\n" /* new lock value = 0x80000000 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: slr 2,2\n" /* old lock value must be 0 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_lock(rw) \ - asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: slgr 2,2\n" /* old lock value must be 0 */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_unlock(rw) \ - asm volatile(" slr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lhi 2,1\n" \ - " sll 2,31\n" /* old lock value must be 0x80000000 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_unlock(rw) \ - asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) - -extern inline int _raw_write_trylock(rwlock_t *rw) +extern void _raw_read_lock_wait(rwlock_t *lp); +extern int _raw_read_trylock_retry(rwlock_t *lp); +extern void _raw_write_lock_wait(rwlock_t *lp); +extern int _raw_write_trylock_retry(rwlock_t *lp); + +static inline void _raw_read_lock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + _raw_read_lock_wait(rw); +} + +static inline void _raw_read_unlock(rwlock_t *rw) +{ + unsigned int old, cmp; + + old = rw->lock; + do { + cmp = old; + old = _raw_compare_and_swap(&rw->lock, old, old - 1); + } while (cmp != old); +} + +static inline void _raw_write_lock(rwlock_t *rw) +{ + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + _raw_write_lock_wait(rw); +} + +static inline void _raw_write_unlock(rwlock_t *rw) +{ + _raw_compare_and_swap(&rw->lock, 0x80000000, 0); +} + +static inline int _raw_read_trylock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) + return 1; + return _raw_read_trylock_retry(rw); +} + +static inline int _raw_write_trylock(rwlock_t *rw) { - unsigned long result, reg; - - __asm__ __volatile__( -#ifndef __s390x__ - " lhi %1,1\n" - " sll %1,31\n" - " cs %0,%1,0(%3)" -#else /* __s390x__ */ - " llihh %1,0x8000\n" - "0: csg %0,%1,0(%3)\n" -#endif /* __s390x__ */ - : "=d" (result), "=&d" (reg), "=m" (rw->lock) - : "a" (&rw->lock), "m" (rw->lock), "0" (0UL) - : "cc", "memory" ); - return result == 0; + if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) + return 1; + return _raw_write_trylock_retry(rw); } #endif /* __ASM_SPINLOCK_H */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index bfbbe94b297d..e82be96d4906 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -145,6 +145,7 @@ enum KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */ KERN_RANDOMIZE=68, /* int: randomize virtual address space */ KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ + KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ }; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e60b9c36f1f0..3e0bbee549ea 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -114,6 +114,7 @@ extern int unaligned_enabled; extern int sysctl_ieee_emulation_warnings; #endif extern int sysctl_userprocess_debug; +extern int spin_retry; #endif extern int sysctl_hz_timer; @@ -647,7 +648,16 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, - +#if defined(CONFIG_ARCH_S390) + { + .ctl_name = KERN_SPIN_RETRY, + .procname = "spin_retry", + .data = &spin_retry, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = 0 } }; -- cgit v1.2.3