From 0f45aa18e65cf3d768082d7d86054a0d2a20bb18 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 19 Jun 2005 19:35:50 +0100 Subject: AUDIT: Allow filtering of user messages Turn the field from a bitmask to an enumeration and add a list to allow filtering of messages generated by userspace. We also define a list for file system watches in anticipation of that feature. Signed-off-by: David Woodhouse --- include/linux/audit.h | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index bf2ad3ba72eb..b5bda24f7365 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -75,10 +75,15 @@ #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ /* Rule flags */ -#define AUDIT_PER_TASK 0x01 /* Apply rule at task creation (not syscall) */ -#define AUDIT_AT_ENTRY 0x02 /* Apply rule at syscall entry */ -#define AUDIT_AT_EXIT 0x04 /* Apply rule at syscall exit */ -#define AUDIT_PREPEND 0x10 /* Prepend to front of list */ +#define AUDIT_FILTER_USER 0x00 /* Apply rule to user-generated messages */ +#define AUDIT_FILTER_TASK 0x01 /* Apply rule at task creation (not syscall) */ +#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */ +#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */ +#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */ + +#define AUDIT_NR_FILTERS 5 + +#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */ /* Rule actions */ #define AUDIT_NEVER 0 /* Do not build context if rule matches */ @@ -230,6 +235,7 @@ extern int audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); extern void audit_signal_info(int sig, struct task_struct *t); +extern int audit_filter_user(struct task_struct *tsk, int type); #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) @@ -246,6 +252,7 @@ extern void audit_signal_info(int sig, struct task_struct *t); #define audit_sockaddr(len, addr) ({ 0; }) #define audit_avc_path(dentry, mnt) ({ 0; }) #define audit_signal_info(s,t) do { ; } while (0) +#define audit_filter_user(struct ({ 1; }) #endif #ifdef CONFIG_AUDIT -- cgit v1.2.3 From ae7b961b1c943367dfe179411f120d7bf8eaba89 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jun 2005 16:11:05 +0100 Subject: AUDIT: Report lookup flags with path/inode records. When LOOKUP_PARENT is used, the inode which results is not the inode found at the pathname. Report the flags so that this doesn't generate misleading audit records. Signed-off-by: David Woodhouse --- fs/namei.c | 2 +- include/linux/audit.h | 4 ++-- kernel/auditsc.c | 18 +++++++++++------- 3 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/namei.c b/fs/namei.c index a7f7f44119b3..6e888dd10461 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1043,7 +1043,7 @@ int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata out: if (unlikely(current->audit_context && nd && nd->dentry && nd->dentry->d_inode)) - audit_inode(name, nd->dentry->d_inode); + audit_inode(name, nd->dentry->d_inode, flags); return retval; } diff --git a/include/linux/audit.h b/include/linux/audit.h index b5bda24f7365..5f812e4d01e4 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -220,7 +220,7 @@ extern void audit_syscall_entry(struct task_struct *task, int arch, extern void audit_syscall_exit(struct task_struct *task, int failed, long return_code); extern void audit_getname(const char *name); extern void audit_putname(const char *name); -extern void audit_inode(const char *name, const struct inode *inode); +extern void audit_inode(const char *name, const struct inode *inode, unsigned flags); /* Private API (for audit.c only) */ extern int audit_receive_filter(int type, int pid, int uid, int seq, @@ -243,7 +243,7 @@ extern int audit_filter_user(struct task_struct *tsk, int type); #define audit_syscall_exit(t,f,r) do { ; } while (0) #define audit_getname(n) do { ; } while (0) #define audit_putname(n) do { ; } while (0) -#define audit_inode(n,i) do { ; } while (0) +#define audit_inode(n,i,f) do { ; } while (0) #define audit_receive_filter(t,p,u,s,d,l) ({ -EOPNOTSUPP; }) #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define audit_get_loginuid(c) ({ -1; }) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 48a39579c45c..031f979019d1 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -95,6 +95,7 @@ struct audit_names { uid_t uid; gid_t gid; dev_t rdev; + unsigned flags; }; struct audit_aux_data { @@ -792,6 +793,8 @@ static void audit_log_exit(struct audit_context *context) audit_log_format(ab, " name="); audit_log_untrustedstring(ab, context->names[i].name); } + audit_log_format(ab, " flags=%x\n", context->names[i].flags); + if (context->names[i].ino != (unsigned long)-1) audit_log_format(ab, " inode=%lu dev=%02x:%02x mode=%#o" " ouid=%u ogid=%u rdev=%02x:%02x", @@ -1018,7 +1021,7 @@ void audit_putname(const char *name) /* Store the inode and device from a lookup. Called from * fs/namei.c:path_lookup(). */ -void audit_inode(const char *name, const struct inode *inode) +void audit_inode(const char *name, const struct inode *inode, unsigned flags) { int idx; struct audit_context *context = current->audit_context; @@ -1044,12 +1047,13 @@ void audit_inode(const char *name, const struct inode *inode) ++context->ino_count; #endif } - context->names[idx].ino = inode->i_ino; - context->names[idx].dev = inode->i_sb->s_dev; - context->names[idx].mode = inode->i_mode; - context->names[idx].uid = inode->i_uid; - context->names[idx].gid = inode->i_gid; - context->names[idx].rdev = inode->i_rdev; + context->names[idx].flags = flags; + context->names[idx].ino = inode->i_ino; + context->names[idx].dev = inode->i_sb->s_dev; + context->names[idx].mode = inode->i_mode; + context->names[idx].uid = inode->i_uid; + context->names[idx].gid = inode->i_gid; + context->names[idx].rdev = inode->i_rdev; } void auditsc_get_stamp(struct audit_context *ctx, -- cgit v1.2.3 From f6a789d19858a951e7ff9e297a44b377c21b6c33 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 21 Jun 2005 16:22:01 +0100 Subject: AUDIT: Spawn kernel thread to list filter rules. If we have enough rules to fill the netlink buffer space, it'll deadlock because auditctl isn't ever actually going to read from the socket until we return, and we aren't going to return until it reads... so we spawn a kernel thread to spew out the list and then exit. Signed-off-by: David Woodhouse --- include/linux/audit.h | 1 + kernel/audit.c | 2 +- kernel/auditsc.c | 53 +++++++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 47 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 5f812e4d01e4..5d1a9dda5acb 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -281,6 +281,7 @@ extern void audit_send_reply(int pid, int seq, int type, int done, int multi, void *payload, int size); extern void audit_log_lost(const char *message); +extern struct semaphore audit_netlink_sem; #else #define audit_log(c,t,f,...) do { ; } while (0) #define audit_log_start(c,t) ({ NULL; }) diff --git a/kernel/audit.c b/kernel/audit.c index ab6ac560cfe5..c1ab8dbbb67b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -110,7 +110,7 @@ static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); /* The netlink socket is only to be read by 1 CPU, which lets us assume * that list additions and deletions never happen simultaneously in * auditsc.c */ -static DECLARE_MUTEX(audit_netlink_sem); +DECLARE_MUTEX(audit_netlink_sem); /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting * audit records. Since printk uses a 1024 byte buffer, this buffer diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 031f979019d1..cb8a44945157 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -39,6 +39,7 @@ #include #include #include +#include #include /* 0 = no checking @@ -281,24 +282,60 @@ static int audit_copy_rule(struct audit_rule *d, struct audit_rule *s) return 0; } +static int audit_list_rules(void *_dest) +{ + int pid, seq; + int *dest = _dest; + struct audit_entry *entry; + int i; + + pid = dest[0]; + seq = dest[1]; + kfree(dest); + + down(&audit_netlink_sem); + + /* The *_rcu iterators not needed here because we are + always called with audit_netlink_sem held. */ + for (i=0; irule, sizeof(entry->rule)); + } + audit_send_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0); + + up(&audit_netlink_sem); + return 0; +} + int audit_receive_filter(int type, int pid, int uid, int seq, void *data, uid_t loginuid) { struct audit_entry *entry; + struct task_struct *tsk; + int *dest; int err = 0; - int i; unsigned listnr; switch (type) { case AUDIT_LIST: - /* The *_rcu iterators not needed here because we are - always called with audit_netlink_sem held. */ - for (i=0; irule, sizeof(entry->rule)); + /* We can't just spew out the rules here because we might fill + * the available socket buffer space and deadlock waiting for + * auditctl to read from it... which isn't ever going to + * happen if we're actually running in the context of auditctl + * trying to _send_ the stuff */ + + dest = kmalloc(2 * sizeof(int), GFP_KERNEL); + if (!dest) + return -ENOMEM; + dest[0] = pid; + dest[1] = seq; + + tsk = kthread_run(audit_list_rules, dest, "audit_list_rules"); + if (IS_ERR(tsk)) { + kfree(dest); + err = PTR_ERR(tsk); } - audit_send_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0); break; case AUDIT_ADD: if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) -- cgit v1.2.3 From 4a4cd633b575609b741a1de7837223a2d9e1c34c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 22 Jun 2005 14:56:47 +0100 Subject: AUDIT: Optimise the audit-disabled case for discarding user messages Also exempt USER_AVC message from being discarded to preserve existing behaviour for SE Linux. Signed-off-by: David Woodhouse --- include/linux/audit.h | 7 ++++--- kernel/audit.c | 32 ++++++++++++++------------------ kernel/auditsc.c | 21 ++++++++++++++++----- 3 files changed, 34 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 5d1a9dda5acb..77adef640537 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -51,7 +51,8 @@ #define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */ #define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */ -#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages uninteresting to kernel */ +#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ +#define AUDIT_USER_AVC 1107 /* We filter this differently */ #define AUDIT_LAST_USER_MSG 1199 #define AUDIT_DAEMON_START 1200 /* Daemon startup record */ @@ -235,7 +236,7 @@ extern int audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); extern void audit_signal_info(int sig, struct task_struct *t); -extern int audit_filter_user(struct task_struct *tsk, int type); +extern int audit_filter_user(int pid, int type); #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) @@ -252,7 +253,7 @@ extern int audit_filter_user(struct task_struct *tsk, int type); #define audit_sockaddr(len, addr) ({ 0; }) #define audit_avc_path(dentry, mnt) ({ 0; }) #define audit_signal_info(s,t) do { ; } while (0) -#define audit_filter_user(struct ({ 1; }) +#define audit_filter_user(p,t) ({ 1; }) #endif #ifdef CONFIG_AUDIT diff --git a/kernel/audit.c b/kernel/audit.c index c1ab8dbbb67b..09a37581213b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -429,25 +429,21 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: - read_lock(&tasklist_lock); - tsk = find_task_by_pid(pid); - if (tsk) - get_task_struct(tsk); - read_unlock(&tasklist_lock); - if (!tsk) - return -ESRCH; - - if (audit_enabled && audit_filter_user(tsk, msg_type)) { - ab = audit_log_start(NULL, msg_type); - if (ab) { - audit_log_format(ab, - "user pid=%d uid=%u auid=%u msg='%.1024s'", - pid, uid, loginuid, (char *)data); - audit_set_pid(ab, pid); - audit_log_end(ab); - } + if (!audit_enabled && msg_type != AUDIT_USER_AVC) + return 0; + + err = audit_filter_user(pid, msg_type); + if (err == 1) { + err = 0; + ab = audit_log_start(NULL, msg_type); + if (ab) { + audit_log_format(ab, + "user pid=%d uid=%u auid=%u msg='%.1024s'", + pid, uid, loginuid, (char *)data); + audit_set_pid(ab, pid); + audit_log_end(ab); + } } - put_task_struct(tsk); break; case AUDIT_ADD: case AUDIT_DEL: diff --git a/kernel/auditsc.c b/kernel/auditsc.c index cb8a44945157..fc858b0c044a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -530,22 +530,33 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, return AUDIT_BUILD_CONTEXT; } -int audit_filter_user(struct task_struct *tsk, int type) +int audit_filter_user(int pid, int type) { + struct task_struct *tsk; struct audit_entry *e; enum audit_state state; + int ret = 1; - if (audit_pid && tsk->pid == audit_pid) - return AUDIT_DISABLED; + read_lock(&tasklist_lock); + tsk = find_task_by_pid(pid); + if (tsk) + get_task_struct(tsk); + read_unlock(&tasklist_lock); + + if (!tsk) + return -ESRCH; rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) { if (audit_filter_rules(tsk, &e->rule, NULL, &state)) { - rcu_read_unlock(); - return state != AUDIT_DISABLED; + if (state == AUDIT_DISABLED) + ret = 0; + break; } } rcu_read_unlock(); + put_task_struct(tsk); + return 1; /* Audit by default */ } -- cgit v1.2.3 From 9ad9ad385be27fcc7c16d290d972c6173e780a61 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 22 Jun 2005 15:04:33 +0100 Subject: AUDIT: Wait for backlog to clear when generating messages. Add a gfp_mask to audit_log_start() and audit_log(), to reduce the amount of GFP_ATOMIC allocation -- most of it doesn't need to be GFP_ATOMIC. Also if the mask includes __GFP_WAIT, then wait up to 60 seconds for the auditd backlog to clear instead of immediately abandoning the message. The timeout should probably be made configurable, but for now it'll suffice that it only happens if auditd is actually running. Signed-off-by: David Woodhouse --- include/linux/audit.h | 8 +++--- kernel/audit.c | 60 +++++++++++++++++++++++++++++++----------- kernel/auditsc.c | 14 +++++----- security/selinux/avc.c | 4 +-- security/selinux/hooks.c | 2 +- security/selinux/ss/services.c | 4 +-- 6 files changed, 61 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 77adef640537..2f56546eb248 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -259,11 +259,11 @@ extern int audit_filter_user(int pid, int type); #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ -extern void audit_log(struct audit_context *ctx, int type, - const char *fmt, ...) - __attribute__((format(printf,3,4))); +extern void audit_log(struct audit_context *ctx, int gfp_mask, + int type, const char *fmt, ...) + __attribute__((format(printf,4,5))); -extern struct audit_buffer *audit_log_start(struct audit_context *ctx,int type); +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, int type); extern void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) __attribute__((format(printf,2,3))); diff --git a/kernel/audit.c b/kernel/audit.c index 09a37581213b..644ab825118b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -106,6 +106,7 @@ static LIST_HEAD(audit_freelist); static struct sk_buff_head audit_skb_queue; static struct task_struct *kauditd_task; static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); +static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); /* The netlink socket is only to be read by 1 CPU, which lets us assume * that list additions and deletions never happen simultaneously in @@ -130,6 +131,7 @@ struct audit_buffer { struct list_head list; struct sk_buff *skb; /* formatted skb ready to send */ struct audit_context *ctx; /* NULL or associated context */ + int gfp_mask; }; static void audit_set_pid(struct audit_buffer *ab, pid_t pid) @@ -226,7 +228,7 @@ static int audit_set_rate_limit(int limit, uid_t loginuid) { int old = audit_rate_limit; audit_rate_limit = limit; - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "audit_rate_limit=%d old=%d by auid=%u", audit_rate_limit, old, loginuid); return old; @@ -236,7 +238,7 @@ static int audit_set_backlog_limit(int limit, uid_t loginuid) { int old = audit_backlog_limit; audit_backlog_limit = limit; - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "audit_backlog_limit=%d old=%d by auid=%u", audit_backlog_limit, old, loginuid); return old; @@ -248,7 +250,7 @@ static int audit_set_enabled(int state, uid_t loginuid) if (state != 0 && state != 1) return -EINVAL; audit_enabled = state; - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "audit_enabled=%d old=%d by auid=%u", audit_enabled, old, loginuid); return old; @@ -262,7 +264,7 @@ static int audit_set_failure(int state, uid_t loginuid) && state != AUDIT_FAIL_PANIC) return -EINVAL; audit_failure = state; - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "audit_failure=%d old=%d by auid=%u", audit_failure, old, loginuid); return old; @@ -274,6 +276,7 @@ int kauditd_thread(void *dummy) while (1) { skb = skb_dequeue(&audit_skb_queue); + wake_up(&audit_backlog_wait); if (skb) { if (audit_pid) { int err = netlink_unicast(audit_sock, skb, audit_pid, 0); @@ -417,7 +420,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (status_get->mask & AUDIT_STATUS_PID) { int old = audit_pid; audit_pid = status_get->pid; - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "audit_pid=%d old=%d by auid=%u", audit_pid, old, loginuid); } @@ -435,7 +438,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) err = audit_filter_user(pid, msg_type); if (err == 1) { err = 0; - ab = audit_log_start(NULL, msg_type); + ab = audit_log_start(NULL, GFP_KERNEL, msg_type); if (ab) { audit_log_format(ab, "user pid=%d uid=%u auid=%u msg='%.1024s'", @@ -522,7 +525,7 @@ static int __init audit_init(void) skb_queue_head_init(&audit_skb_queue); audit_initialized = 1; audit_enabled = audit_default; - audit_log(NULL, AUDIT_KERNEL, "initialized"); + audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); return 0; } __initcall(audit_init); @@ -586,6 +589,7 @@ static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, goto err; ab->ctx = ctx; + ab->gfp_mask = gfp_mask; nlh = (struct nlmsghdr *)skb_put(ab->skb, NLMSG_SPACE(0)); nlh->nlmsg_type = type; nlh->nlmsg_flags = 0; @@ -644,17 +648,42 @@ static inline void audit_get_stamp(struct audit_context *ctx, * syscall, then the syscall is marked as auditable and an audit record * will be written at syscall exit. If there is no associated task, tsk * should be NULL. */ -struct audit_buffer *audit_log_start(struct audit_context *ctx, int type) + +struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, + int type) { struct audit_buffer *ab = NULL; struct timespec t; unsigned int serial; + int reserve; if (!audit_initialized) return NULL; - if (audit_backlog_limit - && skb_queue_len(&audit_skb_queue) > audit_backlog_limit) { + if (gfp_mask & __GFP_WAIT) + reserve = 0; + else + reserve = 5; /* Allow atomic callers to go up to five + entries over the normal backlog limit */ + + while (audit_backlog_limit + && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { + if (gfp_mask & __GFP_WAIT) { + int ret = 1; + /* Wait for auditd to drain the queue a little */ + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&audit_backlog_wait, &wait); + + if (audit_backlog_limit && + skb_queue_len(&audit_skb_queue) > audit_backlog_limit) + ret = schedule_timeout(HZ * 60); + + __set_current_state(TASK_RUNNING); + remove_wait_queue(&audit_backlog_wait, &wait); + if (ret) + continue; + } if (audit_rate_check()) printk(KERN_WARNING "audit: audit_backlog=%d > " @@ -665,7 +694,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, int type) return NULL; } - ab = audit_buffer_alloc(ctx, GFP_ATOMIC, type); + ab = audit_buffer_alloc(ctx, gfp_mask, type); if (!ab) { audit_log_lost("out of memory in audit_log_start"); return NULL; @@ -689,7 +718,7 @@ static inline int audit_expand(struct audit_buffer *ab, int extra) { struct sk_buff *skb = ab->skb; int ret = pskb_expand_head(skb, skb_headroom(skb), extra, - GFP_ATOMIC); + ab->gfp_mask); if (ret < 0) { audit_log_lost("out of memory in audit_expand"); return 0; @@ -808,7 +837,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, audit_log_format(ab, " %s", prefix); /* We will allow 11 spaces for ' (deleted)' to be appended */ - path = kmalloc(PATH_MAX+11, GFP_KERNEL); + path = kmalloc(PATH_MAX+11, ab->gfp_mask); if (!path) { audit_log_format(ab, ""); return; @@ -849,12 +878,13 @@ void audit_log_end(struct audit_buffer *ab) /* Log an audit record. This is a convenience function that calls * audit_log_start, audit_log_vformat, and audit_log_end. It may be * called in any context. */ -void audit_log(struct audit_context *ctx, int type, const char *fmt, ...) +void audit_log(struct audit_context *ctx, int gfp_mask, int type, + const char *fmt, ...) { struct audit_buffer *ab; va_list args; - ab = audit_log_start(ctx, type); + ab = audit_log_start(ctx, gfp_mask, type); if (ab) { va_start(args, fmt); audit_log_vformat(ab, fmt, args); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index fc858b0c044a..f463fd230846 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -346,7 +346,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, } listnr = entry->rule.flags & ~AUDIT_FILTER_PREPEND; audit_add_rule(entry, &audit_filter_list[listnr]); - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "auid=%u added an audit rule\n", loginuid); break; case AUDIT_DEL: @@ -356,7 +356,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, err = audit_del_rule(data, &audit_filter_list[listnr]); if (!err) - audit_log(NULL, AUDIT_CONFIG_CHANGE, + audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, "auid=%u removed an audit rule\n", loginuid); break; default: @@ -756,7 +756,7 @@ static void audit_log_exit(struct audit_context *context) struct audit_buffer *ab; struct audit_aux_data *aux; - ab = audit_log_start(context, AUDIT_SYSCALL); + ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "arch=%x syscall=%d", @@ -788,7 +788,7 @@ static void audit_log_exit(struct audit_context *context) for (aux = context->aux; aux; aux = aux->next) { - ab = audit_log_start(context, aux->type); + ab = audit_log_start(context, GFP_KERNEL, aux->type); if (!ab) continue; /* audit_panic has been called */ @@ -825,14 +825,14 @@ static void audit_log_exit(struct audit_context *context) } if (context->pwd && context->pwdmnt) { - ab = audit_log_start(context, AUDIT_CWD); + ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt); audit_log_end(ab); } } for (i = 0; i < context->name_count; i++) { - ab = audit_log_start(context, AUDIT_PATH); + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); if (!ab) continue; /* audit_panic has been called */ @@ -1118,7 +1118,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) if (task->audit_context) { struct audit_buffer *ab; - ab = audit_log_start(NULL, AUDIT_LOGIN); + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (ab) { audit_log_format(ab, "login pid=%d uid=%u " "old auid=%u new auid=%u", diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 451502467a9b..2d088bb65ee8 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -242,7 +242,7 @@ void __init avc_init(void) avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 0, SLAB_PANIC, NULL, NULL); - audit_log(current->audit_context, AUDIT_KERNEL, "AVC INITIALIZED\n"); + audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); } int avc_get_hash_stats(char *page) @@ -550,7 +550,7 @@ void avc_audit(u32 ssid, u32 tsid, return; } - ab = audit_log_start(current->audit_context, AUDIT_AVC); + ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); if (!ab) return; /* audit_panic has been called */ audit_log_format(ab, "avc: %s ", denied ? "denied" : "granted"); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index db845cbd5841..b5220a266dce 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3419,7 +3419,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) err = selinux_nlmsg_lookup(isec->sclass, nlh->nlmsg_type, &perm); if (err) { if (err == -EINVAL) { - audit_log(current->audit_context, AUDIT_SELINUX_ERR, + audit_log(current->audit_context, GFP_KERNEL, AUDIT_SELINUX_ERR, "SELinux: unrecognized netlink message" " type=%hu for sclass=%hu\n", nlh->nlmsg_type, isec->sclass); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index b6149147d5cb..2947cf85dc56 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -365,7 +365,7 @@ static int security_validtrans_handle_fail(struct context *ocontext, goto out; if (context_struct_to_string(tcontext, &t, &tlen) < 0) goto out; - audit_log(current->audit_context, AUDIT_SELINUX_ERR, + audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_validate_transition: denied for" " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", o, n, t, policydb.p_class_val_to_name[tclass-1]); @@ -742,7 +742,7 @@ static int compute_sid_handle_invalid_context( goto out; if (context_struct_to_string(newcontext, &n, &nlen) < 0) goto out; - audit_log(current->audit_context, AUDIT_SELINUX_ERR, + audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_compute_sid: invalid context %s" " for scontext=%s" " tcontext=%s" -- cgit v1.2.3 From 5bb289b5a0becb53ac3e1d60815ff8b779296b73 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 24 Jun 2005 14:14:05 +0100 Subject: AUDIT: Clean up user message filtering Don't look up the task by its pid and then use the syscall filtering helper. Just implement our own filter helper which operates solely on the information in the netlink_skb_parms. Signed-off-by: David Woodhouse --- include/linux/audit.h | 5 +++-- kernel/audit.c | 2 +- kernel/auditsc.c | 56 ++++++++++++++++++++++++++++++++++++++------------- 3 files changed, 46 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 2f56546eb248..38999f827a36 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -205,6 +205,7 @@ struct audit_sig_info { struct audit_buffer; struct audit_context; struct inode; +struct netlink_skb_parms; #define AUDITSC_INVALID 0 #define AUDITSC_SUCCESS 1 @@ -236,7 +237,7 @@ extern int audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); extern void audit_signal_info(int sig, struct task_struct *t); -extern int audit_filter_user(int pid, int type); +extern int audit_filter_user(struct netlink_skb_parms *cb, int type); #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) @@ -253,7 +254,7 @@ extern int audit_filter_user(int pid, int type); #define audit_sockaddr(len, addr) ({ 0; }) #define audit_avc_path(dentry, mnt) ({ 0; }) #define audit_signal_info(s,t) do { ; } while (0) -#define audit_filter_user(p,t) ({ 1; }) +#define audit_filter_user(cb,t) ({ 1; }) #endif #ifdef CONFIG_AUDIT diff --git a/kernel/audit.c b/kernel/audit.c index 9af947a63ed1..6f1784dd80af 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -434,7 +434,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; - err = audit_filter_user(pid, msg_type); + err = audit_filter_user(&NETLINK_CB(skb), msg_type); if (err == 1) { err = 0; ab = audit_log_start(NULL, GFP_KERNEL, msg_type); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 7b123f0a9481..34a990223c9e 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -40,6 +40,7 @@ #include #include #include +#include #include /* 0 = no checking @@ -530,35 +531,62 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, return AUDIT_BUILD_CONTEXT; } -int audit_filter_user(int pid, int type) +static int audit_filter_user_rules(struct netlink_skb_parms *cb, + struct audit_rule *rule, + enum audit_state *state) +{ + int i; + + for (i = 0; i < rule->field_count; i++) { + u32 field = rule->fields[i] & ~AUDIT_NEGATE; + u32 value = rule->values[i]; + int result = 0; + + switch (field) { + case AUDIT_PID: + result = (cb->creds.pid == value); + break; + case AUDIT_UID: + result = (cb->creds.uid == value); + break; + case AUDIT_GID: + result = (cb->creds.gid == value); + break; + case AUDIT_LOGINUID: + result = (cb->loginuid == value); + break; + } + + if (rule->fields[i] & AUDIT_NEGATE) + result = !result; + if (!result) + return 0; + } + switch (rule->action) { + case AUDIT_NEVER: *state = AUDIT_DISABLED; break; + case AUDIT_POSSIBLE: *state = AUDIT_BUILD_CONTEXT; break; + case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; + } + return 1; +} + +int audit_filter_user(struct netlink_skb_parms *cb, int type) { - struct task_struct *tsk; struct audit_entry *e; enum audit_state state; int ret = 1; - read_lock(&tasklist_lock); - tsk = find_task_by_pid(pid); - if (tsk) - get_task_struct(tsk); - read_unlock(&tasklist_lock); - - if (!tsk) - return -ESRCH; - rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) { - if (audit_filter_rules(tsk, &e->rule, NULL, &state)) { + if (audit_filter_user_rules(cb, &e->rule, &state)) { if (state == AUDIT_DISABLED) ret = 0; break; } } rcu_read_unlock(); - put_task_struct(tsk); return ret; /* Audit by default */ - } /* This should be called with task_lock() held. */ -- cgit v1.2.3 From 13774024da8ebdf17212c0f5a83f5b0681a649eb Mon Sep 17 00:00:00 2001 From: Badari Pulavarty Date: Sat, 2 Jul 2005 13:49:07 +0100 Subject: AUDIT: Fix definition of audit_log_start() if audit not enabled audit_log_start() seems to take 3 arguments, but its defined to take only 2 when AUDIT is turned off. security/selinux/avc.c:553:75: macro "audit_log_start" passed 3 arguments, but takes just 2 Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- include/linux/audit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 38999f827a36..c22405fc2ddf 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -286,7 +286,7 @@ extern void audit_log_lost(const char *message); extern struct semaphore audit_netlink_sem; #else #define audit_log(c,t,f,...) do { ; } while (0) -#define audit_log_start(c,t) ({ NULL; }) +#define audit_log_start(c,g,t) ({ NULL; }) #define audit_log_vformat(b,f,a) do { ; } while (0) #define audit_log_format(b,f,...) do { ; } while (0) #define audit_log_end(b) do { ; } while (0) -- cgit v1.2.3 From 7b430437c0de81681ecfa8efa8f55823df733529 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 2 Jul 2005 13:50:40 +0100 Subject: AUDIT: Fix definition of audit_log() if audit not enabled audit_log() also takes an extra argument, although it's a vararg function so the compiler didn't really notice. Signed-off-by: David Woodhouse --- include/linux/audit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index c22405fc2ddf..d68e85580a53 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -285,7 +285,7 @@ extern void audit_send_reply(int pid, int seq, int type, extern void audit_log_lost(const char *message); extern struct semaphore audit_netlink_sem; #else -#define audit_log(c,t,f,...) do { ; } while (0) +#define audit_log(c,g,t,f,...) do { ; } while (0) #define audit_log_start(c,g,t) ({ NULL; }) #define audit_log_vformat(b,f,a) do { ; } while (0) #define audit_log_format(b,f,...) do { ; } while (0) -- cgit v1.2.3 From b67dbf9d4c1987c370fd18fdc4cf9d8aaea604c2 Mon Sep 17 00:00:00 2001 From: Greg KH Date: Thu, 7 Jul 2005 14:37:53 -0700 Subject: [PATCH] add securityfs for all LSMs to use Here's a small patch against 2.6.13-rc2 that adds securityfs, a virtual fs that all LSMs can use instead of creating their own. The fs should be mounted at /sys/kernel/security, and the fs creates that mount point. This will make the LSB people happy that we aren't creating a new /my_lsm_fs directory in the root for every different LSM. It has changed a bit since the last version, thanks to comments from Mike Waychison. Signed-off-by: Greg Kroah-Hartman Signed-off-by: Chris Wright --- include/linux/security.h | 5 + security/Makefile | 2 +- security/inode.c | 347 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 353 insertions(+), 1 deletion(-) create mode 100644 security/inode.c (limited to 'include/linux') diff --git a/include/linux/security.h b/include/linux/security.h index b42095a68b1c..cd3d8a9f951e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1983,6 +1983,11 @@ extern int register_security (struct security_operations *ops); extern int unregister_security (struct security_operations *ops); extern int mod_reg_security (const char *name, struct security_operations *ops); extern int mod_unreg_security (const char *name, struct security_operations *ops); +extern struct dentry *securityfs_create_file(const char *name, mode_t mode, + struct dentry *parent, void *data, + struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +extern void securityfs_remove(struct dentry *dentry); #else /* CONFIG_SECURITY */ diff --git a/security/Makefile b/security/Makefile index 197cc2f3f1ec..8cbbf2f36709 100644 --- a/security/Makefile +++ b/security/Makefile @@ -11,7 +11,7 @@ obj-y += commoncap.o endif # Object file lists -obj-$(CONFIG_SECURITY) += security.o dummy.o +obj-$(CONFIG_SECURITY) += security.o dummy.o inode.o # Must precede capability.o in order to stack properly. obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o diff --git a/security/inode.c b/security/inode.c new file mode 100644 index 000000000000..a5964502ae30 --- /dev/null +++ b/security/inode.c @@ -0,0 +1,347 @@ +/* + * inode.c - securityfs + * + * Copyright (C) 2005 Greg Kroah-Hartman + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * Based on fs/debugfs/inode.c which had the following copyright notice: + * Copyright (C) 2004 Greg Kroah-Hartman + * Copyright (C) 2004 IBM Inc. + */ + +/* #define DEBUG */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define SECURITYFS_MAGIC 0x73636673 + +static struct vfsmount *mount; +static int mount_count; + +/* + * TODO: + * I think I can get rid of these default_file_ops, but not quite sure... + */ +static ssize_t default_read_file(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return 0; +} + +static ssize_t default_write_file(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +static int default_open(struct inode *inode, struct file *file) +{ + if (inode->u.generic_ip) + file->private_data = inode->u.generic_ip; + + return 0; +} + +static struct file_operations default_file_ops = { + .read = default_read_file, + .write = default_write_file, + .open = default_open, +}; + +static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) +{ + struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_mode = mode; + inode->i_uid = 0; + inode->i_gid = 0; + inode->i_blksize = PAGE_CACHE_SIZE; + inode->i_blocks = 0; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + switch (mode & S_IFMT) { + default: + init_special_inode(inode, mode, dev); + break; + case S_IFREG: + inode->i_fop = &default_file_ops; + break; + case S_IFDIR: + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + /* directory inodes start off with i_nlink == 2 (for "." entry) */ + inode->i_nlink++; + break; + } + } + return inode; +} + +/* SMP-safe */ +static int mknod(struct inode *dir, struct dentry *dentry, + int mode, dev_t dev) +{ + struct inode *inode; + int error = -EPERM; + + if (dentry->d_inode) + return -EEXIST; + + inode = get_inode(dir->i_sb, mode, dev); + if (inode) { + d_instantiate(dentry, inode); + dget(dentry); + error = 0; + } + return error; +} + +static int mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + int res; + + mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; + res = mknod(dir, dentry, mode, 0); + if (!res) + dir->i_nlink++; + return res; +} + +static int create(struct inode *dir, struct dentry *dentry, int mode) +{ + mode = (mode & S_IALLUGO) | S_IFREG; + return mknod(dir, dentry, mode, 0); +} + +static inline int positive(struct dentry *dentry) +{ + return dentry->d_inode && !d_unhashed(dentry); +} + +static int fill_super(struct super_block *sb, void *data, int silent) +{ + static struct tree_descr files[] = {{""}}; + + return simple_fill_super(sb, SECURITYFS_MAGIC, files); +} + +static struct super_block *get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) +{ + return get_sb_single(fs_type, flags, data, fill_super); +} + +static struct file_system_type fs_type = { + .owner = THIS_MODULE, + .name = "securityfs", + .get_sb = get_sb, + .kill_sb = kill_litter_super, +}; + +static int create_by_name(const char *name, mode_t mode, + struct dentry *parent, + struct dentry **dentry) +{ + int error = 0; + + *dentry = NULL; + + /* If the parent is not specified, we create it in the root. + * We need the root dentry to do this, which is in the super + * block. A pointer to that is in the struct vfsmount that we + * have around. + */ + if (!parent ) { + if (mount && mount->mnt_sb) { + parent = mount->mnt_sb->s_root; + } + } + if (!parent) { + pr_debug("securityfs: Ah! can not find a parent!\n"); + return -EFAULT; + } + + down(&parent->d_inode->i_sem); + *dentry = lookup_one_len(name, parent, strlen(name)); + if (!IS_ERR(dentry)) { + if ((mode & S_IFMT) == S_IFDIR) + error = mkdir(parent->d_inode, *dentry, mode); + else + error = create(parent->d_inode, *dentry, mode); + } else + error = PTR_ERR(dentry); + up(&parent->d_inode->i_sem); + + return error; +} + +/** + * securityfs_create_file - create a file in the securityfs filesystem + * + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this paramater is NULL, then the + * file will be created in the root of the securityfs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.u.generic_ip pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations that should be used for + * this file. + * + * This is the basic "create a file" function for securityfs. It allows for a + * wide range of flexibility in createing a file, or a directory (if you + * want to create a directory, the securityfs_create_dir() function is + * recommended to be used instead.) + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the securityfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, NULL will be returned. + * + * If securityfs is not enabled in the kernel, the value -ENODEV will be + * returned. It is not wise to check for this value, but rather, check for + * NULL or !NULL instead as to eliminate the need for #ifdef in the calling + * code. + */ +struct dentry *securityfs_create_file(const char *name, mode_t mode, + struct dentry *parent, void *data, + struct file_operations *fops) +{ + struct dentry *dentry = NULL; + int error; + + pr_debug("securityfs: creating file '%s'\n",name); + + error = simple_pin_fs("securityfs", &mount, &mount_count); + if (error) { + dentry = ERR_PTR(error); + goto exit; + } + + error = create_by_name(name, mode, parent, &dentry); + if (error) { + dentry = ERR_PTR(error); + simple_release_fs(&mount, &mount_count); + goto exit; + } + + if (dentry->d_inode) { + if (fops) + dentry->d_inode->i_fop = fops; + if (data) + dentry->d_inode->u.generic_ip = data; + } +exit: + return dentry; +} +EXPORT_SYMBOL_GPL(securityfs_create_file); + +/** + * securityfs_create_dir - create a directory in the securityfs filesystem + * + * @name: a pointer to a string containing the name of the directory to + * create. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this paramater is NULL, then the + * directory will be created in the root of the securityfs filesystem. + * + * This function creates a directory in securityfs with the given name. + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the securityfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, NULL will be returned. + * + * If securityfs is not enabled in the kernel, the value -ENODEV will be + * returned. It is not wise to check for this value, but rather, check for + * NULL or !NULL instead as to eliminate the need for #ifdef in the calling + * code. + */ +struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) +{ + return securityfs_create_file(name, + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, + parent, NULL, NULL); +} +EXPORT_SYMBOL_GPL(securityfs_create_dir); + +/** + * securityfs_remove - removes a file or directory from the securityfs filesystem + * + * @dentry: a pointer to a the dentry of the file or directory to be + * removed. + * + * This function removes a file or directory in securityfs that was previously + * created with a call to another securityfs function (like + * securityfs_create_file() or variants thereof.) + * + * This function is required to be called in order for the file to be + * removed, no automatic cleanup of files will happen when a module is + * removed, you are responsible here. + */ +void securityfs_remove(struct dentry *dentry) +{ + struct dentry *parent; + + if (!dentry) + return; + + parent = dentry->d_parent; + if (!parent || !parent->d_inode) + return; + + down(&parent->d_inode->i_sem); + if (positive(dentry)) { + if (dentry->d_inode) { + if (S_ISDIR(dentry->d_inode->i_mode)) + simple_rmdir(parent->d_inode, dentry); + else + simple_unlink(parent->d_inode, dentry); + dput(dentry); + } + } + up(&parent->d_inode->i_sem); + simple_release_fs(&mount, &mount_count); +} +EXPORT_SYMBOL_GPL(securityfs_remove); + +static decl_subsys(security, NULL, NULL); + +static int __init securityfs_init(void) +{ + int retval; + + kset_set_kset_s(&security_subsys, kernel_subsys); + retval = subsystem_register(&security_subsys); + if (retval) + return retval; + + retval = register_filesystem(&fs_type); + if (retval) + subsystem_unregister(&security_subsys); + return retval; +} + +static void __exit securityfs_exit(void) +{ + simple_release_fs(&mount, &mount_count); + unregister_filesystem(&fs_type); + subsystem_unregister(&security_subsys); +} + +core_initcall(securityfs_init); +module_exit(securityfs_exit); +MODULE_LICENSE("GPL"); + -- cgit v1.2.3 From 676e1a2c1e7499eee8e7a81e577b4b6ba71ffb25 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 12 Sep 2005 14:51:00 +0200 Subject: [ALSA] [PATCH] Add missing sound PCI IDs to pci_ids.h Added missing PCI IDs for sound drivers to pci_ids.h. Signed-off-by: Takashi Iwai --- include/linux/pci_ids.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ee0ab7a5f91b..31da85f4ab6e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -447,6 +447,10 @@ #define PCI_DEVICE_ID_CIRRUS_7542 0x1200 #define PCI_DEVICE_ID_CIRRUS_7543 0x1202 #define PCI_DEVICE_ID_CIRRUS_7541 0x1204 +#define PCI_DEVICE_ID_CIRRUS_4610 0x6001 +#define PCI_DEVICE_ID_CIRRUS_4612 0x6003 +#define PCI_DEVICE_ID_CIRRUS_4615 0x6004 +#define PCI_DEVICE_ID_CIRRUS_4281 0x6005 #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a @@ -682,7 +686,9 @@ #define PCI_DEVICE_ID_SI_6326 0x6326 #define PCI_DEVICE_ID_SI_7001 0x7001 #define PCI_DEVICE_ID_SI_7012 0x7012 +#define PCI_DEVICE_ID_SI_7013 0x7013 #define PCI_DEVICE_ID_SI_7016 0x7016 +#define PCI_DEVICE_ID_SI_7018 0x7018 #define PCI_VENDOR_ID_HP 0x103c #define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 @@ -991,6 +997,7 @@ #define PCI_DEVICE_ID_BROOKTREE_849A 0x0351 #define PCI_DEVICE_ID_BROOKTREE_878_1 0x036e #define PCI_DEVICE_ID_BROOKTREE_878 0x0878 +#define PCI_DEVICE_ID_BROOKTREE_879 0x0879 #define PCI_DEVICE_ID_BROOKTREE_8474 0x8474 #define PCI_VENDOR_ID_SIERRA 0x10a8 @@ -1109,6 +1116,9 @@ #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004 #define PCI_DEVICE_ID_NEOMAGIC_MAGICMEDIA_256AV 0x0005 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZVPLUS 0x0083 +#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 +#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 +#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_ABP940 0x1200 @@ -1155,10 +1165,13 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 #define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066 +#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 #define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086 +#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 +#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a #define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e #define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 @@ -1173,6 +1186,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 #define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6 +#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da #define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df #define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 @@ -1180,6 +1194,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 #define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6 +#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 @@ -1230,6 +1245,7 @@ #define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 #define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 #define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3 #define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 @@ -1334,6 +1350,13 @@ #define PCI_DEVICE_ID_REALTEK_8169 0x8169 #define PCI_VENDOR_ID_XILINX 0x10ee +#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 +#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 +#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 +#define PCI_DEVICE_IDRME__DIGI96_8_PAD_OR_PST 0x3fc3 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL 0x3fc4 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 #define PCI_DEVICE_ID_TURBOPAM 0x4020 #define PCI_VENDOR_ID_TRUEVISION 0x10fa @@ -1809,6 +1832,14 @@ #define PCI_DEVICE_ID_ESS_ESS1968 0x1968 #define PCI_DEVICE_ID_ESS_AUDIOPCI 0x1969 #define PCI_DEVICE_ID_ESS_ESS1978 0x1978 +#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 +#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 +#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 +#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 +#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 +#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 +#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a +#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b #define PCI_VENDOR_ID_SATSAGEM 0x1267 #define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 @@ -1968,6 +1999,9 @@ #define PCI_DEVICE_ID_LMC_SSI 0x0005 #define PCI_DEVICE_ID_LMC_T1 0x0006 +#define PCI_VENDOR_ID_MARIAN 0x1382 +#define PCI_DEVICE_ID_MARIAN_PRODIF_PLUS 0x2048 + #define PCI_VENDOR_ID_NETGEAR 0x1385 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a #define PCI_DEVICE_ID_NETGEAR_GA622 0x622a @@ -2056,6 +2090,10 @@ #define PCI_VENDOR_ID_TIMEDIA 0x1409 #define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 +#define PCI_VENDOR_ID_ICE 0x1412 +#define PCI_DEVICE_ID_ICE_1712 0x1712 +#define PCI_DEVICE_ID_VT1724 0x1724 + #define PCI_VENDOR_ID_OXSEMI 0x1415 #define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 @@ -2536,6 +2574,7 @@ #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 #define PCI_DEVICE_ID_INTEL_440MX 0x7195 +#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 #define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 #define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 #define PCI_DEVICE_ID_INTEL_82443MX_2 0x719a @@ -2642,6 +2681,11 @@ #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000A +#define PCI_VENDOR_ID_XILINX_RME 0xea60 +#define PCI_DEVICE_ID_RME_DIGI32 0x9896 +#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 +#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 + #define PCI_VENDOR_ID_ARK 0xedd8 #define PCI_DEVICE_ID_ARK_STING 0xa091 #define PCI_DEVICE_ID_ARK_STINGARK 0xa099 -- cgit v1.2.3 From 921717a2a1cde78c9b2aa971c16510d63efe7320 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 13 Sep 2005 01:25:13 -0700 Subject: [PATCH] Make BUILD_BUG_ON fail at compile time. Force a compiler error instead of a link error, because they are easier to track down. Idea stolen from code by Jan Beulich If the argument to BUILD_BUG_ON evaluates to non-zero the compiler will do: t.c:6: error: size of array `type name' is negative (surprised that gcc doesn't have an extension for this) Signed-off-by: "Andi Kleen" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 687ba8c9973d..4367ce4db52a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -307,8 +307,8 @@ struct sysinfo { char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */ }; -extern void BUILD_BUG(void); -#define BUILD_BUG_ON(condition) do { if (condition) BUILD_BUG(); } while(0) +/* Force a compilation error if condition is false */ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #ifdef CONFIG_SYSCTL extern int randomize_va_space; -- cgit v1.2.3 From 498d0c5711094b0e1fd93f5355d270ccebdec706 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 13 Sep 2005 01:25:14 -0700 Subject: [PATCH] set_current_state() commentary Explain the mysteries of set_current_state(). Quoth Linus: The scheduler itself never needs the memory barrier at all. The barrier is needed only if the user itself ends up testing some other thing afterwards, ie if you have set_process_state(TASK_INTERRUPTIBLE); if (still_need_to_sleep()) schedule(); then the "still_need_to_sleep()" thing may test flags and wakeup events, and then you _may_ want to (and often do) make sure that the write of TASK_INTERRUPTIBLE is serialized wrt the reads of any wakeup data (since the wakeup may have happened on another CPU). So the comment is somewhat wrong. We don't really _care_ whether the state propagates out to other CPU's since all of our actions are purely local, and there is nothing we do that is conditional on any other CPU: we're going to sleep unconditionally, and the scheduler only cares about _our_ state, not about somebody elses state. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 38c8654aaa96..49e617fa0f66 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -121,6 +121,17 @@ extern unsigned long nr_iowait(void); #define set_task_state(tsk, state_value) \ set_mb((tsk)->state, (state_value)) +/* + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (do_i_need_to_sleep()) + * schedule(); + * + * If the caller does not need such serialisation then use __set_current_state() + */ #define __set_current_state(state_value) \ do { current->state = (state_value); } while (0) #define set_current_state(state_value) \ -- cgit v1.2.3 From 9dc7a86e85593c834bb930f5d5aba3a19ee7a350 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Tue, 13 Sep 2005 01:25:19 -0700 Subject: [PATCH] cciss: new controller pci/subsystem ids This patch adds new PCI and subsystem ID's that finally made the spec. It also include a name change for one controller. I know there's a lot of duplicat names but the fw folks wanted this for the different implementations. Even though the same ASIC is used it may be embedded on some platforms, standup card in others, and a mezzanine in other servers. Signed-off-by: Mike Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cciss.txt | 4 +++- drivers/block/cciss.c | 33 ++++++++++++++++++++++++--------- include/linux/pci_ids.h | 4 +++- 3 files changed, 30 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/cciss.txt b/Documentation/cciss.txt index c8f9a73111da..68a711fb82cf 100644 --- a/Documentation/cciss.txt +++ b/Documentation/cciss.txt @@ -17,7 +17,9 @@ This driver is known to work with the following cards: * SA P600 * SA P800 * SA E400 - * SA E300 + * SA P400i + * SA E200 + * SA E200i If nodes are not already created in the /dev/cciss directory, run as root: diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 28f2c177a541..4a49d7972126 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -47,14 +47,14 @@ #include #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) -#define DRIVER_NAME "HP CISS Driver (v 2.6.6)" -#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6) +#define DRIVER_NAME "HP CISS Driver (v 2.6.8)" +#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); -MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6"); +MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8"); MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" - " SA6i P600 P800 E400 E300"); + " SA6i P600 P800 P400 P400i E200 E200i"); MODULE_LICENSE("GPL"); #include "cciss_cmd.h" @@ -83,12 +83,22 @@ static const struct pci_device_id cciss_pci_device_id[] = { 0x0E11, 0x4091, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225, 0, 0, 0}, - { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103c, 0x3223, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, - 0x103c, 0x3231, 0, 0, 0}, + 0x103c, 0x3234, 0, 0, 0}, { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, - 0x103c, 0x3233, 0, 0, 0}, + 0x103c, 0x3235, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3211, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3212, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3213, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3214, 0, 0, 0}, + { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, + 0x103c, 0x3215, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); @@ -111,8 +121,13 @@ static struct board_type products[] = { { 0x40910E11, "Smart Array 6i", &SA5_access}, { 0x3225103C, "Smart Array P600", &SA5_access}, { 0x3223103C, "Smart Array P800", &SA5_access}, - { 0x3231103C, "Smart Array E400", &SA5_access}, - { 0x3233103C, "Smart Array E300", &SA5_access}, + { 0x3234103C, "Smart Array P400", &SA5_access}, + { 0x3235103C, "Smart Array P400i", &SA5_access}, + { 0x3211103C, "Smart Array E200i", &SA5_access}, + { 0x3212103C, "Smart Array E200", &SA5_access}, + { 0x3213103C, "Smart Array E200i", &SA5_access}, + { 0x3214103C, "Smart Array E200i", &SA5_access}, + { 0x3215103C, "Smart Array E200i", &SA5_access}, }; /* How long to wait (in millesconds) for board to go into simple mode */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ee0ab7a5f91b..7fcb44bc9af9 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -713,10 +713,12 @@ #define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 #define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 #define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_CISS 0x3210 #define PCI_DEVICE_ID_HP_CISSA 0x3220 #define PCI_DEVICE_ID_HP_CISSB 0x3222 -#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_DEVICE_ID_HP_CISSC 0x3230 +#define PCI_DEVICE_ID_HP_CISSD 0x3238 +#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 -- cgit v1.2.3 From 610827dee82731c7be5a135d750d194ac56881a9 Mon Sep 17 00:00:00 2001 From: Peter Osterlund Date: Tue, 13 Sep 2005 01:25:29 -0700 Subject: [PATCH] pktcdvd: BUG_ON cleanups Remove some redundant BUG_ON() statements in pktcdvd and move one run-time check to compile-time. Signed-off-by: Peter Osterlund Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/pktcdvd.c | 9 +++------ include/linux/pktcdvd.h | 3 +++ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index b443fe5eebe4..7e22a58926b8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -669,7 +669,6 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in } offs += CD_FRAMESIZE; if (offs >= PAGE_SIZE) { - BUG_ON(offs > PAGE_SIZE); offs = 0; p++; } @@ -804,10 +803,11 @@ static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zo list_del_init(&pkt->list); if (pkt->sector != zone) pkt->cache_valid = 0; - break; + return pkt; } } - return pkt; + BUG(); + return NULL; } static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) @@ -951,7 +951,6 @@ try_next_bio: } pkt = pkt_get_packet_data(pd, zone); - BUG_ON(!pkt); pd->current_sector = zone + pd->settings.size; pkt->sector = zone; @@ -2211,7 +2210,6 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio) * No matching packet found. Store the bio in the work queue. */ node = mempool_alloc(pd->rb_pool, GFP_NOIO); - BUG_ON(!node); node->bio = bio; spin_lock(&pd->lock); BUG_ON(pd->bio_queue_size < 0); @@ -2419,7 +2417,6 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); - BUG_ON(!pd); switch (cmd) { /* diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 4b32bce9a289..2c177e4c8f22 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -166,6 +166,9 @@ struct packet_iosched /* * 32 buffers of 2048 bytes */ +#if (PAGE_SIZE % CD_FRAMESIZE) != 0 +#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" +#endif #define PACKET_MAX_SIZE 32 #define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE) #define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) -- cgit v1.2.3 From f2327d9adb1e948a7041128e971effd8d6e2d42c Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Tue, 13 Sep 2005 01:25:37 -0700 Subject: [PATCH] nfsd4: move replay_owner It seems more natural to move the setting of the replay_owner into the relevant procedure instead of doing it in nfsv4_proc_compound. Signed-off-by: J. Bruce Fields Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfsd/nfs4proc.c | 24 ++++++++++-------------- fs/nfsd/nfs4state.c | 30 ++++++++++++++++++++---------- include/linux/nfsd/xdr4.h | 15 ++++++++++----- 3 files changed, 40 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index e08edc17c6a0..361b4007d4a0 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -162,7 +162,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ static inline int -nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) +nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, struct nfs4_stateowner **replay_owner) { int status; dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n", @@ -238,8 +238,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open */ status = nfsd4_process_open2(rqstp, current_fh, open); out: - if (open->op_stateowner) + if (open->op_stateowner) { nfs4_get_stateowner(open->op_stateowner); + *replay_owner = open->op_stateowner; + } nfs4_unlock_state(); return status; } @@ -809,8 +811,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, op->status = nfsd4_access(rqstp, current_fh, &op->u.access); break; case OP_CLOSE: - op->status = nfsd4_close(rqstp, current_fh, &op->u.close); - replay_owner = op->u.close.cl_stateowner; + op->status = nfsd4_close(rqstp, current_fh, &op->u.close, &replay_owner); break; case OP_COMMIT: op->status = nfsd4_commit(rqstp, current_fh, &op->u.commit); @@ -831,15 +832,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, op->status = nfsd4_link(rqstp, current_fh, save_fh, &op->u.link); break; case OP_LOCK: - op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock); - replay_owner = op->u.lock.lk_stateowner; + op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock, &replay_owner); break; case OP_LOCKT: op->status = nfsd4_lockt(rqstp, current_fh, &op->u.lockt); break; case OP_LOCKU: - op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku); - replay_owner = op->u.locku.lu_stateowner; + op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku, &replay_owner); break; case OP_LOOKUP: op->status = nfsd4_lookup(rqstp, current_fh, &op->u.lookup); @@ -853,16 +852,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, op->status = nfs_ok; break; case OP_OPEN: - op->status = nfsd4_open(rqstp, current_fh, &op->u.open); - replay_owner = op->u.open.op_stateowner; + op->status = nfsd4_open(rqstp, current_fh, &op->u.open, &replay_owner); break; case OP_OPEN_CONFIRM: - op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm); - replay_owner = op->u.open_confirm.oc_stateowner; + op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm, &replay_owner); break; case OP_OPEN_DOWNGRADE: - op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade); - replay_owner = op->u.open_downgrade.od_stateowner; + op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade, &replay_owner); break; case OP_PUTFH: op->status = nfsd4_putfh(rqstp, current_fh, &op->u.putfh); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 109fd8c3d4f5..11405e530e70 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2286,7 +2286,7 @@ check_replay: } int -nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_confirm *oc) +nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, struct nfs4_stateowner **replay_owner) { int status; struct nfs4_stateowner *sop; @@ -2320,8 +2320,10 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs nfsd4_create_clid_dir(sop->so_client); out: - if (oc->oc_stateowner) + if (oc->oc_stateowner) { nfs4_get_stateowner(oc->oc_stateowner); + *replay_owner = oc->oc_stateowner; + } nfs4_unlock_state(); return status; } @@ -2352,7 +2354,7 @@ reset_union_bmap_deny(unsigned long deny, unsigned long *bmap) } int -nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_downgrade *od) +nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, struct nfs4_stateowner **replay_owner) { int status; struct nfs4_stateid *stp; @@ -2394,8 +2396,10 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct n memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t)); status = nfs_ok; out: - if (od->od_stateowner) + if (od->od_stateowner) { nfs4_get_stateowner(od->od_stateowner); + *replay_owner = od->od_stateowner; + } nfs4_unlock_state(); return status; } @@ -2404,7 +2408,7 @@ out: * nfs4_unlock_state() called after encode */ int -nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_close *close) +nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_close *close, struct nfs4_stateowner **replay_owner) { int status; struct nfs4_stateid *stp; @@ -2430,8 +2434,10 @@ nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_clos /* release_state_owner() calls nfsd_close() if needed */ release_state_owner(stp, OPEN_STATE); out: - if (close->cl_stateowner) + if (close->cl_stateowner) { nfs4_get_stateowner(close->cl_stateowner); + *replay_owner = close->cl_stateowner; + } nfs4_unlock_state(); return status; } @@ -2675,7 +2681,7 @@ check_lock_length(u64 offset, u64 length) * LOCK operation */ int -nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock *lock) +nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock *lock, struct nfs4_stateowner **replay_owner) { struct nfs4_stateowner *open_sop = NULL; struct nfs4_stateid *lock_stp; @@ -2835,8 +2841,10 @@ out_destroy_new_stateid: release_state_owner(lock_stp, LOCK_STATE); } out: - if (lock->lk_stateowner) + if (lock->lk_stateowner) { nfs4_get_stateowner(lock->lk_stateowner); + *replay_owner = lock->lk_stateowner; + } nfs4_unlock_state(); return status; } @@ -2925,7 +2933,7 @@ out: } int -nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_locku *locku) +nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_locku *locku, struct nfs4_stateowner **replay_owner) { struct nfs4_stateid *stp; struct file *filp = NULL; @@ -2981,8 +2989,10 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t)); out: - if (locku->lu_stateowner) + if (locku->lu_stateowner) { nfs4_get_stateowner(locku->lu_stateowner); + *replay_owner = locku->lu_stateowner; + } nfs4_unlock_state(); return status; diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 4d24d65c0e88..8903688890ce 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -438,17 +438,22 @@ extern int nfsd4_process_open1(struct nfsd4_open *open); extern int nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open); extern int nfsd4_open_confirm(struct svc_rqst *rqstp, - struct svc_fh *current_fh, struct nfsd4_open_confirm *oc); + struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, + struct nfs4_stateowner **); extern int nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, - struct nfsd4_close *close); + struct nfsd4_close *close, + struct nfs4_stateowner **replay_owner); extern int nfsd4_open_downgrade(struct svc_rqst *rqstp, - struct svc_fh *current_fh, struct nfsd4_open_downgrade *od); + struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, + struct nfs4_stateowner **replay_owner); extern int nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, - struct nfsd4_lock *lock); + struct nfsd4_lock *lock, + struct nfs4_stateowner **replay_owner); extern int nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lockt *lockt); extern int nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, - struct nfsd4_locku *locku); + struct nfsd4_locku *locku, + struct nfs4_stateowner **replay_owner); extern int nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_release_lockowner *rlockowner); -- cgit v1.2.3 From 9db455064dfa1c2250e5eda7386c80bc77764e30 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 13 Sep 2005 01:25:40 -0700 Subject: [PATCH] v4l: experimental Sliced VBI API support Adds all defines, ioctls and structs needed for the sliced VBI API VBI = Vertical Blank Interval. It is related with the way TV signals work. It sends a line, then, it has a retrace time to allow the tube to move electrons to the beginning of the next line. This was the main reason at the beginning of analog B&W TV. There is a lot of bandwidth lost on VBI. So, lots of TV systems use it to send other information such as Closed Captions and Teletext. Also, broadcasters uses this as a channel to exchange information from the content producer to their subsidiaries at each city. There's already a raw VBI interface on V4L2 api, used for Closed Captions and Teletext. The decoding is doing at userlevel space and it is mostly for analog TV signals, non encoded. Encoded signals (MPEG, for example), may need also to transmit other information (like, for example, display aspect, i.e. 4x3, widescreen...). Sliced VBI interface is a method to allow the video stream to transmit this kind of information. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/videodev2.h | 109 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 87 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index f623a33b9abe..89a055761bed 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -60,12 +60,17 @@ enum v4l2_field { (field) == V4L2_FIELD_SEQ_BT) enum v4l2_buf_type { - V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, - V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, - V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, - V4L2_BUF_TYPE_VBI_CAPTURE = 4, - V4L2_BUF_TYPE_VBI_OUTPUT = 5, - V4L2_BUF_TYPE_PRIVATE = 0x80, + V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, + V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, + V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, + V4L2_BUF_TYPE_VBI_CAPTURE = 4, + V4L2_BUF_TYPE_VBI_OUTPUT = 5, +#if 1 + /* Experimental Sliced VBI */ + V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, + V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, +#endif + V4L2_BUF_TYPE_PRIVATE = 0x80, }; enum v4l2_ctrl_type { @@ -149,20 +154,24 @@ struct v4l2_capability }; /* Values for 'capabilities' field */ -#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */ -#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */ -#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */ -#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a VBI capture device */ -#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a VBI output device */ -#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ +#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */ +#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */ +#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */ +#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a raw VBI capture device */ +#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a raw VBI output device */ +#if 1 +#define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */ +#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ +#endif +#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ -#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ -#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ -#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */ +#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ +#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ +#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */ -#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ -#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */ -#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */ +#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ +#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */ +#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */ /* * V I D E O I M A G E F O R M A T @@ -809,6 +818,8 @@ struct v4l2_audioout * Data services API by Michael Schimek */ +/* Raw VBI */ + struct v4l2_vbi_format { __u32 sampling_rate; /* in 1 Hz */ @@ -825,6 +836,54 @@ struct v4l2_vbi_format #define V4L2_VBI_UNSYNC (1<< 0) #define V4L2_VBI_INTERLACED (1<< 1) +#if 1 +/* Sliced VBI + * + * This implements is a proposal V4L2 API to allow SLICED VBI + * required for some hardware encoders. It should change without + * notice in the definitive implementation. + */ + +struct v4l2_sliced_vbi_format +{ + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + __u32 io_size; + __u32 reserved[2]; /* must be zero */ +}; + +#define V4L2_SLICED_TELETEXT_B (0x0001) +#define V4L2_SLICED_VPS (0x0400) +#define V4L2_SLICED_CAPTION_525 (0x1000) +#define V4L2_SLICED_WSS_625 (0x4000) + +#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) +#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) + +struct v4l2_sliced_vbi_cap +{ + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + __u32 reserved[4]; /* must be 0 */ +}; + +struct v4l2_sliced_vbi_data +{ + __u32 id; + __u32 field; /* 0: first field, 1: second field */ + __u32 line; /* 1-23 */ + __u32 reserved; /* must be 0 */ + __u8 data[48]; +}; +#endif /* * A G G R E G A T E S T R U C T U R E S @@ -837,10 +896,13 @@ struct v4l2_format enum v4l2_buf_type type; union { - struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE - struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY - struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE - __u8 raw_data[200]; // user-defined + struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE + struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY + struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE +#if 1 + struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE +#endif + __u8 raw_data[200]; // user-defined } fmt; }; @@ -916,6 +978,9 @@ struct v4l2_streamparm #define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout) #define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority) #define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority) +#if 1 +#define VIDIOC_G_SLICED_VBI_CAP _IOR ('V', 69, struct v4l2_sliced_vbi_cap) +#endif /* for compatibility, will go away some day */ #define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) -- cgit v1.2.3 From 2f4516dbd048f25eba78e115e8e73e1e8f04e7f9 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 13 Sep 2005 01:25:44 -0700 Subject: [PATCH] fbcon: constify font data const-ify the font control structures and data, to make somewhat better guarantees that these are not modified anywhere in the kernel. Specifically for a kernel debugger to share this information from the normal kernel code, such a guarantee seems rather desirable. Signed-off-by: Jan Beulich Cc: "Antonino A. Daplas" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/console/fbcon.c | 16 ++++++++-------- drivers/video/console/fbcon.h | 2 +- drivers/video/console/font_10x18.c | 4 ++-- drivers/video/console/font_6x11.c | 4 ++-- drivers/video/console/font_7x14.c | 4 ++-- drivers/video/console/font_8x16.c | 4 ++-- drivers/video/console/font_8x8.c | 4 ++-- drivers/video/console/font_acorn_8x8.c | 4 ++-- drivers/video/console/font_mini_4x6.c | 4 ++-- drivers/video/console/font_pearl_8x8.c | 4 ++-- drivers/video/console/font_sun12x22.c | 4 ++-- drivers/video/console/font_sun8x16.c | 4 ++-- drivers/video/console/fonts.c | 9 ++++----- include/linux/fb.h | 2 +- include/linux/font.h | 10 +++++----- 15 files changed, 39 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index a7b4a52f41f9..0fc8bb499c3f 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -767,7 +767,7 @@ static const char *fbcon_startup(void) const char *display_desc = "frame buffer device"; struct display *p = &fb_display[fg_console]; struct vc_data *vc = vc_cons[fg_console].d; - struct font_desc *font = NULL; + const struct font_desc *font = NULL; struct module *owner; struct fb_info *info = NULL; struct fbcon_ops *ops; @@ -841,7 +841,7 @@ static const char *fbcon_startup(void) info->var.yres); vc->vc_font.width = font->width; vc->vc_font.height = font->height; - vc->vc_font.data = p->fontdata = font->data; + vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ } @@ -941,7 +941,7 @@ static void fbcon_init(struct vc_data *vc, int init) fb, copy the font from that console */ t = &fb_display[svc->vc_num]; if (!vc->vc_font.data) { - vc->vc_font.data = p->fontdata = t->fontdata; + vc->vc_font.data = (void *)(p->fontdata = t->fontdata); vc->vc_font.width = (*default_mode)->vc_font.width; vc->vc_font.height = (*default_mode)->vc_font.height; p->userfont = t->userfont; @@ -1188,7 +1188,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, return; t = &fb_display[svc->vc_num]; if (!vc->vc_font.data) { - vc->vc_font.data = p->fontdata = t->fontdata; + vc->vc_font.data = (void *)(p->fontdata = t->fontdata); vc->vc_font.width = (*default_mode)->vc_font.width; vc->vc_font.height = (*default_mode)->vc_font.height; p->userfont = t->userfont; @@ -2150,7 +2150,7 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } static int fbcon_do_set_font(struct vc_data *vc, int w, int h, - u8 * data, int userfont) + const u8 * data, int userfont) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct display *p = &fb_display[vc->vc_num]; @@ -2168,7 +2168,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, cnt = FNTCHARCNT(data); else cnt = 256; - vc->vc_font.data = p->fontdata = data; + vc->vc_font.data = (void *)(p->fontdata = data); if ((p->userfont = userfont)) REFCOUNT(data)++; vc->vc_font.width = w; @@ -2325,7 +2325,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne tmp->vc_font.width == w && !memcmp(fb_display[i].fontdata, new_data, size)) { kfree(new_data - FONT_EXTRA_WORDS * sizeof(int)); - new_data = fb_display[i].fontdata; + new_data = (u8 *)fb_display[i].fontdata; break; } } @@ -2335,7 +2335,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct font_desc *f; + const struct font_desc *f; if (!name) f = get_default_font(info->var.xres, info->var.yres); diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h index 08befafe11d1..0738cd62def2 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/console/fbcon.h @@ -30,7 +30,7 @@ struct display { /* Filled in by the frame buffer device */ u_short inverse; /* != 0 text black on white as default */ /* Filled in by the low-level console driver */ - u_char *fontdata; + const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ u_short scrollmode; /* Scroll Method */ short yscroll; /* Hardware scrolling */ diff --git a/drivers/video/console/font_10x18.c b/drivers/video/console/font_10x18.c index ff0af96e4dfc..e6aa0eab5bb6 100644 --- a/drivers/video/console/font_10x18.c +++ b/drivers/video/console/font_10x18.c @@ -7,7 +7,7 @@ #define FONTDATAMAX 9216 -static unsigned char fontdata_10x18[FONTDATAMAX] = { +static const unsigned char fontdata_10x18[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ @@ -5132,7 +5132,7 @@ static unsigned char fontdata_10x18[FONTDATAMAX] = { }; -struct font_desc font_10x18 = { +const struct font_desc font_10x18 = { FONT10x18_IDX, "10x18", 10, diff --git a/drivers/video/console/font_6x11.c b/drivers/video/console/font_6x11.c index c52f1294044a..89976cd97494 100644 --- a/drivers/video/console/font_6x11.c +++ b/drivers/video/console/font_6x11.c @@ -8,7 +8,7 @@ #define FONTDATAMAX (11*256) -static unsigned char fontdata_6x11[FONTDATAMAX] = { +static const unsigned char fontdata_6x11[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ @@ -3341,7 +3341,7 @@ static unsigned char fontdata_6x11[FONTDATAMAX] = { }; -struct font_desc font_vga_6x11 = { +const struct font_desc font_vga_6x11 = { VGA6x11_IDX, "ProFont6x11", 6, diff --git a/drivers/video/console/font_7x14.c b/drivers/video/console/font_7x14.c index 1fa7fcf2ff72..bbf116647397 100644 --- a/drivers/video/console/font_7x14.c +++ b/drivers/video/console/font_7x14.c @@ -7,7 +7,7 @@ #define FONTDATAMAX 3584 -static unsigned char fontdata_7x14[FONTDATAMAX] = { +static const unsigned char fontdata_7x14[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ @@ -4108,7 +4108,7 @@ static unsigned char fontdata_7x14[FONTDATAMAX] = { }; -struct font_desc font_7x14 = { +const struct font_desc font_7x14 = { FONT7x14_IDX, "7x14", 7, diff --git a/drivers/video/console/font_8x16.c b/drivers/video/console/font_8x16.c index e6f8dbaa122b..74fe86f28ff4 100644 --- a/drivers/video/console/font_8x16.c +++ b/drivers/video/console/font_8x16.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 4096 -static unsigned char fontdata_8x16[FONTDATAMAX] = { +static const unsigned char fontdata_8x16[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ @@ -4621,7 +4621,7 @@ static unsigned char fontdata_8x16[FONTDATAMAX] = { }; -struct font_desc font_vga_8x16 = { +const struct font_desc font_vga_8x16 = { VGA8x16_IDX, "VGA8x16", 8, diff --git a/drivers/video/console/font_8x8.c b/drivers/video/console/font_8x8.c index 57fbe266a6b9..26199f8ee908 100644 --- a/drivers/video/console/font_8x8.c +++ b/drivers/video/console/font_8x8.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 2048 -static unsigned char fontdata_8x8[FONTDATAMAX] = { +static const unsigned char fontdata_8x8[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ @@ -2573,7 +2573,7 @@ static unsigned char fontdata_8x8[FONTDATAMAX] = { }; -struct font_desc font_vga_8x8 = { +const struct font_desc font_vga_8x8 = { VGA8x8_IDX, "VGA8x8", 8, diff --git a/drivers/video/console/font_acorn_8x8.c b/drivers/video/console/font_acorn_8x8.c index d565505e3069..2d2e39632e2d 100644 --- a/drivers/video/console/font_acorn_8x8.c +++ b/drivers/video/console/font_acorn_8x8.c @@ -3,7 +3,7 @@ #include #include -static unsigned char acorndata_8x8[] = { +static const unsigned char acorndata_8x8[] = { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ @@ -262,7 +262,7 @@ static unsigned char acorndata_8x8[] = { /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -struct font_desc font_acorn_8x8 = { +const struct font_desc font_acorn_8x8 = { ACORN8x8_IDX, "Acorn8x8", 8, diff --git a/drivers/video/console/font_mini_4x6.c b/drivers/video/console/font_mini_4x6.c index 593b95500a0c..d818234fdf11 100644 --- a/drivers/video/console/font_mini_4x6.c +++ b/drivers/video/console/font_mini_4x6.c @@ -43,7 +43,7 @@ __END__; #define FONTDATAMAX 1536 -static unsigned char fontdata_mini_4x6[FONTDATAMAX] = { +static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { /*{*/ /* Char 0: ' ' */ @@ -2147,7 +2147,7 @@ static unsigned char fontdata_mini_4x6[FONTDATAMAX] = { /*}*/ }; -struct font_desc font_mini_4x6 = { +const struct font_desc font_mini_4x6 = { MINI4x6_IDX, "MINI4x6", 4, diff --git a/drivers/video/console/font_pearl_8x8.c b/drivers/video/console/font_pearl_8x8.c index 5fa95f118818..e646c88f55c7 100644 --- a/drivers/video/console/font_pearl_8x8.c +++ b/drivers/video/console/font_pearl_8x8.c @@ -13,7 +13,7 @@ #define FONTDATAMAX 2048 -static unsigned char fontdata_pearl8x8[FONTDATAMAX] = { +static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ @@ -2577,7 +2577,7 @@ static unsigned char fontdata_pearl8x8[FONTDATAMAX] = { }; -struct font_desc font_pearl_8x8 = { +const struct font_desc font_pearl_8x8 = { PEARL8x8_IDX, "PEARL8x8", 8, diff --git a/drivers/video/console/font_sun12x22.c b/drivers/video/console/font_sun12x22.c index c7bd967ea100..ab5eb93407b4 100644 --- a/drivers/video/console/font_sun12x22.c +++ b/drivers/video/console/font_sun12x22.c @@ -2,7 +2,7 @@ #define FONTDATAMAX 11264 -static unsigned char fontdata_sun12x22[FONTDATAMAX] = { +static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ @@ -6151,7 +6151,7 @@ static unsigned char fontdata_sun12x22[FONTDATAMAX] = { }; -struct font_desc font_sun_12x22 = { +const struct font_desc font_sun_12x22 = { SUN12x22_IDX, "SUN12x22", 12, diff --git a/drivers/video/console/font_sun8x16.c b/drivers/video/console/font_sun8x16.c index 2af3ab354652..41f910f5529c 100644 --- a/drivers/video/console/font_sun8x16.c +++ b/drivers/video/console/font_sun8x16.c @@ -2,7 +2,7 @@ #define FONTDATAMAX 4096 -static unsigned char fontdata_sun8x16[FONTDATAMAX] = { +static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, @@ -261,7 +261,7 @@ static unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, }; -struct font_desc font_sun_8x16 = { +const struct font_desc font_sun_8x16 = { SUN8x16_IDX, "SUN8x16", 8, diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c index e79b29702649..4fd07d9eca03 100644 --- a/drivers/video/console/fonts.c +++ b/drivers/video/console/fonts.c @@ -23,7 +23,7 @@ #define NO_FONTS -static struct font_desc *fonts[] = { +static const struct font_desc *fonts[] = { #ifdef CONFIG_FONT_8x8 #undef NO_FONTS &font_vga_8x8, @@ -84,7 +84,7 @@ static struct font_desc *fonts[] = { * */ -struct font_desc *find_font(char *name) +const struct font_desc *find_font(const char *name) { unsigned int i; @@ -108,10 +108,10 @@ struct font_desc *find_font(char *name) * */ -struct font_desc *get_default_font(int xres, int yres) +const struct font_desc *get_default_font(int xres, int yres) { int i, c, cc; - struct font_desc *f, *g; + const struct font_desc *f, *g; g = NULL; cc = -10000; @@ -138,7 +138,6 @@ struct font_desc *get_default_font(int xres, int yres) return g; } -EXPORT_SYMBOL(fonts); EXPORT_SYMBOL(find_font); EXPORT_SYMBOL(get_default_font); diff --git a/include/linux/fb.h b/include/linux/fb.h index 82e39cd0c4fb..c698055266d0 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -619,7 +619,7 @@ struct fb_tilemap { __u32 height; /* height of each tile in scanlines */ __u32 depth; /* color depth of each tile */ __u32 length; /* number of tiles in the map */ - __u8 *data; /* actual tile map: a bitmap array, packed + const __u8 *data; /* actual tile map: a bitmap array, packed to the nearest byte */ }; diff --git a/include/linux/font.h b/include/linux/font.h index 8fc80a7d78ac..53b129f07f6f 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -15,9 +15,9 @@ struct font_desc { int idx; - char *name; + const char *name; int width, height; - void *data; + const void *data; int pref; }; @@ -32,7 +32,7 @@ struct font_desc { #define ACORN8x8_IDX 8 #define MINI4x6_IDX 9 -extern struct font_desc font_vga_8x8, +extern const struct font_desc font_vga_8x8, font_vga_8x16, font_pearl_8x8, font_vga_6x11, @@ -45,11 +45,11 @@ extern struct font_desc font_vga_8x8, /* Find a font with a specific name */ -extern struct font_desc *find_font(char *name); +extern const struct font_desc *find_font(const char *name); /* Get the default font for a specific screen size */ -extern struct font_desc *get_default_font(int xres, int yres); +extern const struct font_desc *get_default_font(int xres, int yres); /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 -- cgit v1.2.3 From 8b7fc4214b550fafe595330e28d7c2c72b8b62f6 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 14 Sep 2005 14:19:17 -0700 Subject: [PATCH] add PCI IDs so RME32 and RME96 drivers build While doing an allyesconfig build, I noticed that the commit commit 8cdfd2519c6c9a1e6057dc5970b2542b35895738 Author: Takashi Iwai Date: Wed Sep 7 14:08:11 2005 +0200 [ALSA] Remove superfluous PCI ID definitions broke the RME32 and RME96 drivers, since the PCI IDs they use seem to have changed names. Here's a patch to fix this -- compile tested only, since I have no idea what the hardware even is. Fix the build of the RME32 and RME96 drivers by having them use the PCI_DEVICE_ID_RME_xxx names defined in instead of the PCI_DEVICE_ID_xxx names that they used to define themselves. Also fix the typo in the id PCI_DEVICE_IDRME__DIGI96_8_PAD_OR_PST so the name is PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST. Signed-off-by: Roland Dreier Acked-by: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pci_ids.h | 2 +- sound/pci/rme32.c | 44 ++++++++++++++++++------------------ sound/pci/rme96.c | 60 ++++++++++++++++++++++++------------------------- 3 files changed, 53 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6c1a142286a..72fe3385743c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1355,7 +1355,7 @@ #define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 #define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 #define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 -#define PCI_DEVICE_IDRME__DIGI96_8_PAD_OR_PST 0x3fc3 +#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 #define PCI_DEVICE_ID_XILINX_HAMMERFALL 0x3fc4 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index 3daeecb9eb0e..cd313af6ebcf 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c @@ -228,11 +228,11 @@ typedef struct snd_rme32 { } rme32_t; static struct pci_device_id snd_rme32_ids[] = { - {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32, + {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, - {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32_8, + {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, - {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32_PRO, + {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, {0,} }; @@ -240,7 +240,7 @@ static struct pci_device_id snd_rme32_ids[] = { MODULE_DEVICE_TABLE(pci, snd_rme32_ids); #define RME32_ISWORKING(rme32) ((rme32)->wcreg & RME32_WCR_START) -#define RME32_PRO_WITH_8414(rme32) ((rme32)->pci->device == PCI_DEVICE_ID_DIGI32_PRO && (rme32)->rev == RME32_PRO_REVISION_WITH_8414) +#define RME32_PRO_WITH_8414(rme32) ((rme32)->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO && (rme32)->rev == RME32_PRO_REVISION_WITH_8414) static int snd_rme32_playback_prepare(snd_pcm_substream_t * substream); @@ -527,21 +527,21 @@ static int snd_rme32_playback_setrate(rme32_t * rme32, int rate) RME32_WCR_FREQ_1; break; case 64000: - if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO) + if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) & ~RME32_WCR_FREQ_1; break; case 88200: - if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO) + if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_1) & ~RME32_WCR_FREQ_0; break; case 96000: - if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO) + if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO) return -EINVAL; rme32->wcreg |= RME32_WCR_DS_BM; rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) | @@ -881,7 +881,7 @@ static int snd_rme32_playback_spdif_open(snd_pcm_substream_t * substream) runtime->hw = snd_rme32_spdif_fd_info; else runtime->hw = snd_rme32_spdif_info; - if (rme32->pci->device == PCI_DEVICE_ID_DIGI32_PRO) { + if (rme32->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO) { runtime->hw.rates |= SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; runtime->hw.rate_max = 96000; } @@ -1408,8 +1408,8 @@ static int __devinit snd_rme32_create(rme32_t * rme32) } /* set up ALSA pcm device for ADAT */ - if ((pci->device == PCI_DEVICE_ID_DIGI32) || - (pci->device == PCI_DEVICE_ID_DIGI32_PRO)) { + if ((pci->device == PCI_DEVICE_ID_RME_DIGI32) || + (pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO)) { /* ADAT is not available on DIGI32 and DIGI32 Pro */ rme32->adat_pcm = NULL; } @@ -1639,11 +1639,11 @@ snd_rme32_info_inputtype_control(snd_kcontrol_t * kcontrol, uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; switch (rme32->pci->device) { - case PCI_DEVICE_ID_DIGI32: - case PCI_DEVICE_ID_DIGI32_8: + case PCI_DEVICE_ID_RME_DIGI32: + case PCI_DEVICE_ID_RME_DIGI32_8: uinfo->value.enumerated.items = 3; break; - case PCI_DEVICE_ID_DIGI32_PRO: + case PCI_DEVICE_ID_RME_DIGI32_PRO: uinfo->value.enumerated.items = 4; break; default: @@ -1670,11 +1670,11 @@ snd_rme32_get_inputtype_control(snd_kcontrol_t * kcontrol, ucontrol->value.enumerated.item[0] = snd_rme32_getinputtype(rme32); switch (rme32->pci->device) { - case PCI_DEVICE_ID_DIGI32: - case PCI_DEVICE_ID_DIGI32_8: + case PCI_DEVICE_ID_RME_DIGI32: + case PCI_DEVICE_ID_RME_DIGI32_8: items = 3; break; - case PCI_DEVICE_ID_DIGI32_PRO: + case PCI_DEVICE_ID_RME_DIGI32_PRO: items = 4; break; default: @@ -1697,11 +1697,11 @@ snd_rme32_put_inputtype_control(snd_kcontrol_t * kcontrol, int change, items = 3; switch (rme32->pci->device) { - case PCI_DEVICE_ID_DIGI32: - case PCI_DEVICE_ID_DIGI32_8: + case PCI_DEVICE_ID_RME_DIGI32: + case PCI_DEVICE_ID_RME_DIGI32_8: items = 3; break; - case PCI_DEVICE_ID_DIGI32_PRO: + case PCI_DEVICE_ID_RME_DIGI32_PRO: items = 4; break; default: @@ -1982,13 +1982,13 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) strcpy(card->driver, "Digi32"); switch (rme32->pci->device) { - case PCI_DEVICE_ID_DIGI32: + case PCI_DEVICE_ID_RME_DIGI32: strcpy(card->shortname, "RME Digi32"); break; - case PCI_DEVICE_ID_DIGI32_8: + case PCI_DEVICE_ID_RME_DIGI32_8: strcpy(card->shortname, "RME Digi32/8"); break; - case PCI_DEVICE_ID_DIGI32_PRO: + case PCI_DEVICE_ID_RME_DIGI32_PRO: strcpy(card->shortname, "RME Digi32 PRO"); break; } diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c index 9983b66dc564..c495cae78dbf 100644 --- a/sound/pci/rme96.c +++ b/sound/pci/rme96.c @@ -233,13 +233,13 @@ typedef struct snd_rme96 { } rme96_t; static struct pci_device_id snd_rme96_ids[] = { - { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96, + { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, - { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8, + { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, - { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8_PRO, + { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, - { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST, + { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, { 0, } }; @@ -248,12 +248,12 @@ MODULE_DEVICE_TABLE(pci, snd_rme96_ids); #define RME96_ISPLAYING(rme96) ((rme96)->wcreg & RME96_WCR_START) #define RME96_ISRECORDING(rme96) ((rme96)->wcreg & RME96_WCR_START_2) -#define RME96_HAS_ANALOG_IN(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST) -#define RME96_HAS_ANALOG_OUT(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PRO || \ - (rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST) +#define RME96_HAS_ANALOG_IN(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST) +#define RME96_HAS_ANALOG_OUT(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO || \ + (rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST) #define RME96_DAC_IS_1852(rme96) (RME96_HAS_ANALOG_OUT(rme96) && (rme96)->rev >= 4) -#define RME96_DAC_IS_1855(rme96) (((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && (rme96)->rev < 4) || \ - ((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PRO && (rme96)->rev == 2)) +#define RME96_DAC_IS_1855(rme96) (((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && (rme96)->rev < 4) || \ + ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO && (rme96)->rev == 2)) #define RME96_185X_MAX_OUT(rme96) ((1 << (RME96_DAC_IS_1852(rme96) ? RME96_AD1852_VOL_BITS : RME96_AD1855_VOL_BITS)) - 1) static int @@ -830,9 +830,9 @@ snd_rme96_setinputtype(rme96_t *rme96, RME96_WCR_INP_1; break; case RME96_INPUT_XLR: - if ((rme96->pci->device != PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && - rme96->pci->device != PCI_DEVICE_ID_DIGI96_8_PRO) || - (rme96->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && + if ((rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && + rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PRO) || + (rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->rev > 4)) { /* Only Digi96/8 PRO and Digi96/8 PAD supports XLR */ @@ -1598,7 +1598,7 @@ snd_rme96_create(rme96_t *rme96) rme96->spdif_pcm->info_flags = 0; /* set up ALSA pcm device for ADAT */ - if (pci->device == PCI_DEVICE_ID_DIGI96) { + if (pci->device == PCI_DEVICE_ID_RME_DIGI96) { /* ADAT is not available on the base model */ rme96->adat_pcm = NULL; } else { @@ -1858,14 +1858,14 @@ snd_rme96_info_inputtype_control(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t * uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; switch (rme96->pci->device) { - case PCI_DEVICE_ID_DIGI96: - case PCI_DEVICE_ID_DIGI96_8: + case PCI_DEVICE_ID_RME_DIGI96: + case PCI_DEVICE_ID_RME_DIGI96_8: uinfo->value.enumerated.items = 3; break; - case PCI_DEVICE_ID_DIGI96_8_PRO: + case PCI_DEVICE_ID_RME_DIGI96_8_PRO: uinfo->value.enumerated.items = 4; break; - case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST: + case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { /* PST */ uinfo->value.enumerated.items = 4; @@ -1895,14 +1895,14 @@ snd_rme96_get_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t ucontrol->value.enumerated.item[0] = snd_rme96_getinputtype(rme96); switch (rme96->pci->device) { - case PCI_DEVICE_ID_DIGI96: - case PCI_DEVICE_ID_DIGI96_8: + case PCI_DEVICE_ID_RME_DIGI96: + case PCI_DEVICE_ID_RME_DIGI96_8: items = 3; break; - case PCI_DEVICE_ID_DIGI96_8_PRO: + case PCI_DEVICE_ID_RME_DIGI96_8_PRO: items = 4; break; - case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST: + case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { /* for handling PST case, (INPUT_ANALOG is moved to INPUT_XLR */ if (ucontrol->value.enumerated.item[0] == RME96_INPUT_ANALOG) { @@ -1932,14 +1932,14 @@ snd_rme96_put_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t int change, items = 3; switch (rme96->pci->device) { - case PCI_DEVICE_ID_DIGI96: - case PCI_DEVICE_ID_DIGI96_8: + case PCI_DEVICE_ID_RME_DIGI96: + case PCI_DEVICE_ID_RME_DIGI96_8: items = 3; break; - case PCI_DEVICE_ID_DIGI96_8_PRO: + case PCI_DEVICE_ID_RME_DIGI96_8_PRO: items = 4; break; - case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST: + case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: if (rme96->rev > 4) { items = 4; } else { @@ -1953,7 +1953,7 @@ snd_rme96_put_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t val = ucontrol->value.enumerated.item[0] % items; /* special case for PST */ - if (rme96->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && rme96->rev > 4) { + if (rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->rev > 4) { if (val == RME96_INPUT_XLR) { val = RME96_INPUT_ANALOG; } @@ -2375,16 +2375,16 @@ snd_rme96_probe(struct pci_dev *pci, strcpy(card->driver, "Digi96"); switch (rme96->pci->device) { - case PCI_DEVICE_ID_DIGI96: + case PCI_DEVICE_ID_RME_DIGI96: strcpy(card->shortname, "RME Digi96"); break; - case PCI_DEVICE_ID_DIGI96_8: + case PCI_DEVICE_ID_RME_DIGI96_8: strcpy(card->shortname, "RME Digi96/8"); break; - case PCI_DEVICE_ID_DIGI96_8_PRO: + case PCI_DEVICE_ID_RME_DIGI96_8_PRO: strcpy(card->shortname, "RME Digi96/8 PRO"); break; - case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST: + case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST: pci_read_config_byte(rme96->pci, 8, &val); if (val < 5) { strcpy(card->shortname, "RME Digi96/8 PAD"); -- cgit v1.2.3 From 20ae975dfd54de581287b2ca8a1ad97099ab0396 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 14 Sep 2005 20:52:37 -0700 Subject: [NETLINK]: Reserve a slot for NETLINK_GENERIC. As requested by Jamal. Signed-off-by: David S. Miller --- include/linux/netlink.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 7bbd25970c9e..bdebdc564506 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -20,6 +20,7 @@ #define NETLINK_IP6_FW 13 #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ #define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ +#define NETLINK_GENERIC 16 #define MAX_LINKS 32 -- cgit v1.2.3 From 17b14451fd2b187ddd6303726755a3af0a926b6c Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Thu, 15 Sep 2005 15:44:00 +0100 Subject: [PATCH] PATCH: remove function for non-PCI as requested Signed-off-by: Alan Cox Signed-off-by: Jeff Garzik --- drivers/scsi/libata-core.c | 81 ++++++++++++++++++++++++++++------------------ include/linux/libata.h | 1 + 2 files changed, 50 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 5cc53cd9323e..72bdc91e148c 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -4122,6 +4122,53 @@ err_out: return 0; } +/** + * ata_host_set_remove - PCI layer callback for device removal + * @host_set: ATA host set that was removed + * + * Unregister all objects associated with this host set. Free those + * objects. + * + * LOCKING: + * Inherited from calling layer (may sleep). + */ + + +void ata_host_set_remove(struct ata_host_set *host_set) +{ + struct ata_port *ap; + unsigned int i; + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + scsi_remove_host(ap->host); + } + + free_irq(host_set->irq, host_set); + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + + ata_scsi_release(ap->host); + + if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { + struct ata_ioports *ioaddr = &ap->ioaddr; + + if (ioaddr->cmd_addr == 0x1f0) + release_region(0x1f0, 8); + else if (ioaddr->cmd_addr == 0x170) + release_region(0x170, 8); + } + + scsi_host_put(ap->host); + } + + if (host_set->ops->host_stop) + host_set->ops->host_stop(host_set); + + kfree(host_set); +} + /** * ata_scsi_release - SCSI layer callback hook for host unload * @host: libata host to be unloaded @@ -4462,39 +4509,8 @@ void ata_pci_remove_one (struct pci_dev *pdev) { struct device *dev = pci_dev_to_dev(pdev); struct ata_host_set *host_set = dev_get_drvdata(dev); - struct ata_port *ap; - unsigned int i; - - for (i = 0; i < host_set->n_ports; i++) { - ap = host_set->ports[i]; - - scsi_remove_host(ap->host); - } - - free_irq(host_set->irq, host_set); - - for (i = 0; i < host_set->n_ports; i++) { - ap = host_set->ports[i]; - - ata_scsi_release(ap->host); - - if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { - struct ata_ioports *ioaddr = &ap->ioaddr; - - if (ioaddr->cmd_addr == 0x1f0) - release_region(0x1f0, 8); - else if (ioaddr->cmd_addr == 0x170) - release_region(0x170, 8); - } - - scsi_host_put(ap->host); - } - - if (host_set->ops->host_stop) - host_set->ops->host_stop(host_set); - - kfree(host_set); + ata_host_set_remove(host_set); pci_release_regions(pdev); pci_disable_device(pdev); dev_set_drvdata(dev, NULL); @@ -4564,6 +4580,7 @@ module_exit(ata_exit); EXPORT_SYMBOL_GPL(ata_std_bios_param); EXPORT_SYMBOL_GPL(ata_std_ports); EXPORT_SYMBOL_GPL(ata_device_add); +EXPORT_SYMBOL_GPL(ata_host_set_remove); EXPORT_SYMBOL_GPL(ata_sg_init); EXPORT_SYMBOL_GPL(ata_sg_init_one); EXPORT_SYMBOL_GPL(ata_qc_complete); diff --git a/include/linux/libata.h b/include/linux/libata.h index 022105c745fc..ceee1fc42c60 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -393,6 +393,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i extern void ata_pci_remove_one (struct pci_dev *pdev); #endif /* CONFIG_PCI */ extern int ata_device_add(struct ata_probe_ent *ent); +extern void ata_host_set_remove(struct ata_host_set *host_set); extern int ata_scsi_detect(Scsi_Host_Template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); -- cgit v1.2.3 From a063cf5b7dde94d98f3f7c9f1c951e02c6564022 Mon Sep 17 00:00:00 2001 From: Karsten Keil Date: Fri, 16 Sep 2005 19:32:53 +0200 Subject: [PATCH] Add PCI IDs for Sitecom DC-105 Sitecom DC-105 PCI work with hfc_pci HiSax driver Signed-off-by: Karsten Keil Signed-off-by: Linus Torvalds --- drivers/isdn/hisax/hfc_pci.c | 1 + include/linux/pci_ids.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 8337b0f26cc4..4866fc32d8d9 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -61,6 +61,7 @@ static const PCI_ENTRY id_list[] = {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"}, + {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"}, {0, 0, NULL, NULL}, }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 72fe3385743c..b6ef7eb4bcbf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2252,6 +2252,9 @@ #define PCI_VENDOR_ID_INFINICON 0x1820 +#define PCI_VENDOR_ID_SITECOM 0x182d +#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 + #define PCI_VENDOR_ID_TOPSPIN 0x1867 #define PCI_VENDOR_ID_TDI 0x192E -- cgit v1.2.3 From 06168d8a10ceccced51380d683245b33474d428a Mon Sep 17 00:00:00 2001 From: Karsten Keil Date: Fri, 16 Sep 2005 19:34:17 +0200 Subject: [PATCH] cleanup whitespace in pci_ids.h Signed-off-by: Karsten Keil Signed-off-by: Linus Torvalds --- include/linux/pci_ids.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b6ef7eb4bcbf..486d1c1676bd 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2254,7 +2254,7 @@ #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 - + #define PCI_VENDOR_ID_TOPSPIN 0x1867 #define PCI_VENDOR_ID_TDI 0x192E -- cgit v1.2.3 From 67e6b629212fa9ffb7420e8a88a41806af637e28 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 16 Sep 2005 16:58:40 -0700 Subject: [DCCP]: Introduce DCCP_SOCKOPT_SERVICE As discussed in the dccp@vger mailing list: Now applications have to use setsockopt(DCCP_SOCKOPT_SERVICE, service[s]), prior to calling listen() and connect(). An array of unsigned ints can be passed meaning that the listening sock accepts connection requests for several services. With this we can ditch struct sockaddr_dccp and use only sockaddr_in (and sockaddr_in6 in the future). Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/dccp.h | 40 ++++++++++++++++++------- net/dccp/dccp.h | 9 +++--- net/dccp/ipv4.c | 30 +++++++++++++++++-- net/dccp/minisocks.c | 6 ++-- net/dccp/output.c | 14 ++++----- net/dccp/proto.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 6 files changed, 154 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 8bf4bacb5051..0e72708677e4 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -4,16 +4,6 @@ #include #include -/* Structure describing an Internet (DCCP) socket address. */ -struct sockaddr_dccp { - __u16 sdccp_family; /* Address family */ - __u16 sdccp_port; /* Port number */ - __u32 sdccp_addr; /* Internet address */ - __u32 sdccp_service; /* Service */ - /* Pad to size of `struct sockaddr': 16 bytes . */ - __u32 sdccp_pad; -}; - /** * struct dccp_hdr - generic part of DCCP packet header * @@ -188,6 +178,9 @@ enum { /* DCCP socket options */ #define DCCP_SOCKOPT_PACKET_SIZE 1 +#define DCCP_SOCKOPT_SERVICE 2 + +#define DCCP_SERVICE_LIST_MAX_LEN 32 #ifdef __KERNEL__ @@ -382,6 +375,25 @@ enum dccp_role { DCCP_ROLE_SERVER, }; +struct dccp_service_list { + __u32 dccpsl_nr; + __u32 dccpsl_list[0]; +}; + +#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) + +static inline int dccp_list_has_service(const struct dccp_service_list *sl, + const u32 service) +{ + if (likely(sl != NULL)) { + u32 i = sl->dccpsl_nr; + while (i--) + if (sl->dccpsl_list[i] == service) + return 1; + } + return 0; +} + /** * struct dccp_sock - DCCP socket state * @@ -417,7 +429,8 @@ struct dccp_sock { __u64 dccps_gss; __u64 dccps_gsr; __u64 dccps_gar; - unsigned long dccps_service; + __u32 dccps_service; + struct dccp_service_list *dccps_service_list; struct timeval dccps_timestamp_time; __u32 dccps_timestamp_echo; __u32 dccps_packet_size; @@ -443,6 +456,11 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk) return (struct dccp_sock *)sk; } +static inline int dccp_service_not_initialized(const struct sock *sk) +{ + return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE; +} + static inline const char *dccp_role(const struct sock *sk) { switch (dccp_sk(sk)->dccps_role) { diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 95c4630b3b18..be7a660b6b24 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -258,13 +258,12 @@ extern int dccp_v4_send_reset(struct sock *sk, extern void dccp_send_close(struct sock *sk, const int active); struct dccp_skb_cb { - __u8 dccpd_type; - __u8 dccpd_reset_code; - __u8 dccpd_service; - __u8 dccpd_ccval; + __u8 dccpd_type:4; + __u8 dccpd_ccval:4; + __u8 dccpd_reset_code; + __u16 dccpd_opt_len; __u64 dccpd_seq; __u64 dccpd_ack_seq; - int dccpd_opt_len; }; #define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0])) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e09907d8b7da..94a440b2685b 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -246,6 +246,9 @@ static int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, dp->dccps_role = DCCP_ROLE_CLIENT; + if (dccp_service_not_initialized(sk)) + return -EPROTO; + if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; @@ -661,6 +664,16 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk, dccp_hdr(skb)->dccph_sport); } +static inline int dccp_bad_service_code(const struct sock *sk, + const __u32 service) +{ + const struct dccp_sock *dp = dccp_sk(sk); + + if (dp->dccps_service == service) + return 0; + return !dccp_list_has_service(dp->dccps_service_list, service); +} + int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct inet_request_sock *ireq; @@ -669,6 +682,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) struct dccp_request_sock *dreq; const __u32 saddr = skb->nh.iph->saddr; const __u32 daddr = skb->nh.iph->daddr; + const __u32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; struct dst_entry *dst = NULL; @@ -680,6 +694,10 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop; } + if (dccp_bad_service_code(sk, service)) { + reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; + goto drop; + } /* * TW buckets are converted to open requests without * limitations, they conserve resources and peer is @@ -722,9 +740,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * dccp_create_openreq_child. */ dreq = dccp_rsk(req); - dreq->dreq_isr = dcb->dccpd_seq; - dreq->dreq_iss = dccp_v4_init_sequence(sk, skb); - dreq->dreq_service = dccp_hdr_request(skb)->dccph_req_service; + dreq->dreq_isr = dcb->dccpd_seq; + dreq->dreq_iss = dccp_v4_init_sequence(sk, skb); + dreq->dreq_service = service; if (dccp_v4_send_response(sk, req, dst)) goto drop_and_free; @@ -1284,6 +1302,7 @@ static int dccp_v4_init_sock(struct sock *sk) sk->sk_write_space = dccp_write_space; dp->dccps_mss_cache = 536; dp->dccps_role = DCCP_ROLE_UNDEFINED; + dp->dccps_service = DCCP_SERVICE_INVALID_VALUE; return 0; } @@ -1305,6 +1324,11 @@ static int dccp_v4_destroy_sock(struct sock *sk) if (inet_csk(sk)->icsk_bind_hash != NULL) inet_put_port(&dccp_hashinfo, sk); + if (dp->dccps_service_list != NULL) { + kfree(dp->dccps_service_list); + dp->dccps_service_list = NULL; + } + ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts); diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 18461bc04cbe..933e10db1789 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -93,9 +93,11 @@ struct sock *dccp_create_openreq_child(struct sock *sk, struct inet_connection_sock *newicsk = inet_csk(sk); struct dccp_sock *newdp = dccp_sk(newsk); + newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackpkts = NULL; - newdp->dccps_role = DCCP_ROLE_SERVER; - newicsk->icsk_rto = DCCP_TIMEOUT_INIT; + newdp->dccps_service_list = NULL; + newdp->dccps_service = dreq->dreq_service; + newicsk->icsk_rto = DCCP_TIMEOUT_INIT; do_gettimeofday(&newdp->dccps_epoch); if (newdp->dccps_options.dccpo_send_ack_vector) { diff --git a/net/dccp/output.c b/net/dccp/output.c index ea6d0e91e511..156b1d29a156 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -85,7 +85,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = - dcb->dccpd_service; + dp->dccps_service; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = @@ -270,6 +270,7 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; + struct dccp_request_sock *dreq; const int dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); @@ -285,8 +286,9 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, skb->dst = dst_clone(dst); skb->csum = 0; + dreq = dccp_rsk(req); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; - DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss; + DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; dccp_insert_options(sk, skb); skb->h.raw = skb_push(skb, dccp_header_size); @@ -300,8 +302,9 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; - dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss); - dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr); + dccp_hdr_set_seq(dh, dreq->dreq_iss); + dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); + dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr, inet_rsk(req)->rmt_addr); @@ -397,9 +400,6 @@ int dccp_connect(struct sock *sk) skb_reserve(skb, MAX_DCCP_HEADER); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; - /* FIXME: set service to something meaningful, coming - * from userspace*/ - DCCP_SKB_CB(skb)->dccpd_service = 0; skb->csum = 0; skb_set_owner_w(skb, sk); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 18a0e69c9dc7..9bda2868eba6 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -94,7 +94,15 @@ EXPORT_SYMBOL_GPL(dccp_state_name); static inline int dccp_listen_start(struct sock *sk) { - dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN; + struct dccp_sock *dp = dccp_sk(sk); + + dp->dccps_role = DCCP_ROLE_LISTEN; + /* + * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE) + * before calling listen() + */ + if (dccp_service_not_initialized(sk)) + return -EPROTO; return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE); } @@ -202,6 +210,42 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) return -ENOIOCTLCMD; } +static int dccp_setsockopt_service(struct sock *sk, const u32 service, + char __user *optval, int optlen) +{ + struct dccp_sock *dp = dccp_sk(sk); + struct dccp_service_list *sl = NULL; + + if (service == DCCP_SERVICE_INVALID_VALUE || + optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) + return -EINVAL; + + if (optlen > sizeof(service)) { + sl = kmalloc(optlen, GFP_KERNEL); + if (sl == NULL) + return -ENOMEM; + + sl->dccpsl_nr = optlen / sizeof(u32) - 1; + if (copy_from_user(sl->dccpsl_list, + optval + sizeof(service), + optlen - sizeof(service)) || + dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { + kfree(sl); + return -EFAULT; + } + } + + lock_sock(sk); + dp->dccps_service = service; + + if (dp->dccps_service_list != NULL) + kfree(dp->dccps_service_list); + + dp->dccps_service_list = sl; + release_sock(sk); + return 0; +} + int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen) { @@ -218,8 +262,10 @@ int dccp_setsockopt(struct sock *sk, int level, int optname, if (get_user(val, (int __user *)optval)) return -EFAULT; - lock_sock(sk); + if (optname == DCCP_SOCKOPT_SERVICE) + return dccp_setsockopt_service(sk, val, optval, optlen); + lock_sock(sk); dp = dccp_sk(sk); err = 0; @@ -236,6 +282,37 @@ int dccp_setsockopt(struct sock *sk, int level, int optname, return err; } +static int dccp_getsockopt_service(struct sock *sk, int len, + u32 __user *optval, + int __user *optlen) +{ + const struct dccp_sock *dp = dccp_sk(sk); + const struct dccp_service_list *sl; + int err = -ENOENT, slen = 0, total_len = sizeof(u32); + + lock_sock(sk); + if (dccp_service_not_initialized(sk)) + goto out; + + if ((sl = dp->dccps_service_list) != NULL) { + slen = sl->dccpsl_nr * sizeof(u32); + total_len += slen; + } + + err = -EINVAL; + if (total_len > len) + goto out; + + err = 0; + if (put_user(total_len, optlen) || + put_user(dp->dccps_service, optval) || + (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen))) + err = -EFAULT; +out: + release_sock(sk); + return err; +} + int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -248,6 +325,10 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; + if (optname == DCCP_SOCKOPT_SERVICE) + return dccp_getsockopt_service(sk, len, + (u32 __user *)optval, optlen); + len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; -- cgit v1.2.3 From 1cbf07478bbf3e350a2025bc5ea23fedaa95855a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 16 Sep 2005 16:59:20 -0700 Subject: [TG3]: Add AMD K8 to list of write-reorder chipsets. Thanks to Andy Stewart for the report and testing debug patches from Michael Chan. Signed-off-by: David S. Miller --- drivers/net/tg3.c | 8 +++++--- include/linux/pci_ids.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 7599f52e15b3..52eae9f096e2 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -67,8 +67,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.39" -#define DRV_MODULE_RELDATE "September 5, 2005" +#define DRV_MODULE_VERSION "3.40" +#define DRV_MODULE_RELDATE "September 15, 2005" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -9271,6 +9271,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) static struct pci_device_id write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_K8_NB) }, { }, }; u32 misc_ctrl_reg; @@ -9285,7 +9287,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_SUN_570X; #endif - /* If we have an AMD 762 chipset, write + /* If we have an AMD 762 or K8 chipset, write * reordering to the mailbox registers done by the host * controller can cause major troubles. We read back from * every mailbox register write to force the writes to be diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 72fe3385743c..3dc161894c7b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -491,6 +491,7 @@ #define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 #define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD_K8_NB 0x1100 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 -- cgit v1.2.3 From 8ac2120d90273c590cf7662f03d103519101685b Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 16 Sep 2005 19:28:08 -0700 Subject: [PATCH] i2c: kill an unused i2c_adapter struct member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kill an unused member of the i2c_adapter structure. This additionally fixes a potential bug, because doesn't include , so different files including could see a different definition of the i2c_adapter structure, depending on them including (or other header files themselves including ) before , or not. Credits go to Jörn Engel for pointing me to the problem. Signed-off-by: Jean Delvare Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/i2c.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/i2c.h b/include/linux/i2c.h index be35332b67e6..3d49a305bf88 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -230,11 +230,6 @@ struct i2c_adapter { struct device dev; /* the adapter device */ struct class_device class_dev; /* the class device */ -#ifdef CONFIG_PROC_FS - /* No need to set this when you initialize the adapter */ - int inode; -#endif /* def CONFIG_PROC_FS */ - int nr; struct list_head clients; struct list_head list; -- cgit v1.2.3 From f647e08a55d2c88c4e7ab17a0a8e3fcf568fbc65 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 16 Sep 2005 19:28:10 -0700 Subject: [PATCH] joystick-vs-x.org fix Fix http://bugzilla.kernel.org/show_bug.cgi?id=5241 2.6.13 broke compilation of the xorg tree, which apprarently insists on including that file. Cc: Vojtech Pavlik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/joystick.h | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/joystick.h b/include/linux/joystick.h index 06b9af77eb7f..5fd20ddd7ae3 100644 --- a/include/linux/joystick.h +++ b/include/linux/joystick.h @@ -111,29 +111,30 @@ struct js_corr { #define JS_SET_ALL 8 struct JS_DATA_TYPE { - __s32 buttons; - __s32 x; - __s32 y; + int32_t buttons; + int32_t x; + int32_t y; }; struct JS_DATA_SAVE_TYPE_32 { - __s32 JS_TIMEOUT; - __s32 BUSY; - __s32 JS_EXPIRETIME; - __s32 JS_TIMELIMIT; + int32_t JS_TIMEOUT; + int32_t BUSY; + int32_t JS_EXPIRETIME; + int32_t JS_TIMELIMIT; struct JS_DATA_TYPE JS_SAVE; struct JS_DATA_TYPE JS_CORR; }; struct JS_DATA_SAVE_TYPE_64 { - __s32 JS_TIMEOUT; - __s32 BUSY; - __s64 JS_EXPIRETIME; - __s64 JS_TIMELIMIT; + int32_t JS_TIMEOUT; + int32_t BUSY; + int64_t JS_EXPIRETIME; + int64_t JS_TIMELIMIT; struct JS_DATA_TYPE JS_SAVE; struct JS_DATA_TYPE JS_CORR; }; +#ifdef __KERNEL__ #if BITS_PER_LONG == 64 #define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64 #elif BITS_PER_LONG == 32 @@ -141,5 +142,6 @@ struct JS_DATA_SAVE_TYPE_64 { #else #error Unexpected BITS_PER_LONG #endif +#endif #endif /* _LINUX_JOYSTICK_H */ -- cgit v1.2.3 From ae31c3399d17b1f7bc1742724f70476b5417744f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 18 Sep 2005 00:17:51 -0700 Subject: [DCCP]: Move the ack vector code to net/dccp/ackvec.[ch] Isolating it, that will be used when we introduce a CCID2 (TCP-Like) implementation. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/dccp.h | 14 +- net/dccp/Makefile | 2 + net/dccp/ackvec.c | 419 ++++++++++++++++++++++++++++++++++++++++++++++++ net/dccp/ackvec.h | 133 ++++++++++++++++ net/dccp/dccp.h | 82 ++-------- net/dccp/input.c | 69 ++------ net/dccp/ipv4.c | 59 ++----- net/dccp/minisocks.c | 13 +- net/dccp/options.c | 440 +-------------------------------------------------- net/dccp/output.c | 12 +- 10 files changed, 612 insertions(+), 631 deletions(-) create mode 100644 net/dccp/ackvec.c create mode 100644 net/dccp/ackvec.h (limited to 'include/linux') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 0e72708677e4..8c8e029095a5 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -353,14 +353,8 @@ static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) extern struct inet_timewait_death_row dccp_death_row; -/* Read about the ECN nonce to see why it is 253 */ -#define DCCP_MAX_ACK_VECTOR_LEN 253 - struct dccp_options_received { - u32 dccpor_ndp:24, - dccpor_ack_vector_len:8; - u32 dccpor_ack_vector_idx:10; - /* 22 bits hole, try to pack */ + u32 dccpor_ndp; /* only 24 bits */ u32 dccpor_timestamp; u32 dccpor_timestamp_echo; u32 dccpor_elapsed_time; @@ -394,6 +388,8 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl, return 0; } +struct dccp_ackvec; + /** * struct dccp_sock - DCCP socket state * @@ -414,7 +410,7 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl, * @dccps_packet_size - Set thru setsockopt * @dccps_role - Role of this sock, one of %dccp_role * @dccps_ndp_count - number of Non Data Packets since last data packet - * @dccps_hc_rx_ackpkts - receiver half connection acked packets + * @dccps_hc_rx_ackvec - rx half connection ack vector */ struct dccp_sock { /* inet_connection_sock has to be the first member of dccp_sock */ @@ -439,7 +435,7 @@ struct dccp_sock { __u32 dccps_pmtu_cookie; __u32 dccps_mss_cache; struct dccp_options dccps_options; - struct dccp_ackpkts *dccps_hc_rx_ackpkts; + struct dccp_ackvec *dccps_hc_rx_ackvec; void *dccps_hc_rx_ccid_private; void *dccps_hc_tx_ccid_private; struct ccid *dccps_hc_rx_ccid; diff --git a/net/dccp/Makefile b/net/dccp/Makefile index fb97bb042455..344a8da153fc 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile @@ -3,6 +3,8 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp-y := ccid.o input.o ipv4.o minisocks.o options.o output.o proto.o \ timer.o +dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o + obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o dccp_diag-y := diag.o diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c new file mode 100644 index 000000000000..6530283eafca --- /dev/null +++ b/net/dccp/ackvec.c @@ -0,0 +1,419 @@ +/* + * net/dccp/ackvec.c + * + * An implementation of the DCCP protocol + * Copyright (c) 2005 Arnaldo Carvalho de Melo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License; + */ + +#include "ackvec.h" +#include "dccp.h" + +#include +#include + +#include + +int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) +{ + struct dccp_sock *dp = dccp_sk(sk); + struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; + int len = av->dccpav_vec_len + 2; + struct timeval now; + u32 elapsed_time; + unsigned char *to, *from; + + dccp_timestamp(sk, &now); + elapsed_time = timeval_delta(&now, &av->dccpav_time) / 10; + + if (elapsed_time != 0) + dccp_insert_option_elapsed_time(sk, skb, elapsed_time); + + if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) + return -1; + + /* + * XXX: now we have just one ack vector sent record, so + * we have to wait for it to be cleared. + * + * Of course this is not acceptable, but this is just for + * basic testing now. + */ + if (av->dccpav_ack_seqno != DCCP_MAX_SEQNO + 1) + return -1; + + DCCP_SKB_CB(skb)->dccpd_opt_len += len; + + to = skb_push(skb, len); + *to++ = DCCPO_ACK_VECTOR_0; + *to++ = len; + + len = av->dccpav_vec_len; + from = av->dccpav_buf + av->dccpav_buf_head; + + /* Check if buf_head wraps */ + if (av->dccpav_buf_head + len > av->dccpav_vec_len) { + const u32 tailsize = (av->dccpav_vec_len - av->dccpav_buf_head); + + memcpy(to, from, tailsize); + to += tailsize; + len -= tailsize; + from = av->dccpav_buf; + } + + memcpy(to, from, len); + /* + * From draft-ietf-dccp-spec-11.txt: + * + * For each acknowledgement it sends, the HC-Receiver will add an + * acknowledgement record. ack_seqno will equal the HC-Receiver + * sequence number it used for the ack packet; ack_ptr will equal + * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will + * equal buf_nonce. + * + * This implemention uses just one ack record for now. + */ + av->dccpav_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; + av->dccpav_ack_ptr = av->dccpav_buf_head; + av->dccpav_ack_ackno = av->dccpav_buf_ackno; + av->dccpav_ack_nonce = av->dccpav_buf_nonce; + av->dccpav_sent_len = av->dccpav_vec_len; + + dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, " + "ack_ackno=%llu\n", + debug_prefix, av->dccpav_sent_len, + (unsigned long long)av->dccpav_ack_seqno, + (unsigned long long)av->dccpav_ack_ackno); + return -1; +} + +struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, + const unsigned int __nocast priority) +{ + struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); + + if (av != NULL) { + av->dccpav_buf_len = len; + av->dccpav_buf_head = + av->dccpav_buf_tail = av->dccpav_buf_len - 1; + av->dccpav_buf_ackno = + av->dccpav_ack_ackno = av->dccpav_ack_seqno = ~0LLU; + av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; + av->dccpav_ack_ptr = 0; + av->dccpav_time.tv_sec = 0; + av->dccpav_time.tv_usec = 0; + av->dccpav_sent_len = av->dccpav_vec_len = 0; + } + + return av; +} + +void dccp_ackvec_free(struct dccp_ackvec *av) +{ + kfree(av); +} + +static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, + const unsigned int index) +{ + return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK; +} + +static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, + const unsigned int index) +{ + return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK; +} + +/* + * If several packets are missing, the HC-Receiver may prefer to enter multiple + * bytes with run length 0, rather than a single byte with a larger run length; + * this simplifies table updates if one of the missing packets arrives. + */ +static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, + const unsigned int packets, + const unsigned char state) +{ + unsigned int gap; + signed long new_head; + + if (av->dccpav_vec_len + packets > av->dccpav_buf_len) + return -ENOBUFS; + + gap = packets - 1; + new_head = av->dccpav_buf_head - packets; + + if (new_head < 0) { + if (gap > 0) { + memset(av->dccpav_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED, + gap + new_head + 1); + gap = -new_head; + } + new_head += av->dccpav_buf_len; + } + + av->dccpav_buf_head = new_head; + + if (gap > 0) + memset(av->dccpav_buf + av->dccpav_buf_head + 1, + DCCP_ACKVEC_STATE_NOT_RECEIVED, gap); + + av->dccpav_buf[av->dccpav_buf_head] = state; + av->dccpav_vec_len += packets; + return 0; +} + +/* + * Implements the draft-ietf-dccp-spec-11.txt Appendix A + */ +int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, + const u64 ackno, const u8 state) +{ + /* + * Check at the right places if the buffer is full, if it is, tell the + * caller to start dropping packets till the HC-Sender acks our ACK + * vectors, when we will free up space in dccpav_buf. + * + * We may well decide to do buffer compression, etc, but for now lets + * just drop. + * + * From Appendix A: + * + * Of course, the circular buffer may overflow, either when the + * HC-Sender is sending data at a very high rate, when the + * HC-Receiver's acknowledgements are not reaching the HC-Sender, + * or when the HC-Sender is forgetting to acknowledge those acks + * (so the HC-Receiver is unable to clean up old state). In this + * case, the HC-Receiver should either compress the buffer (by + * increasing run lengths when possible), transfer its state to + * a larger buffer, or, as a last resort, drop all received + * packets, without processing them whatsoever, until its buffer + * shrinks again. + */ + + /* See if this is the first ackno being inserted */ + if (av->dccpav_vec_len == 0) { + av->dccpav_buf[av->dccpav_buf_head] = state; + av->dccpav_vec_len = 1; + } else if (after48(ackno, av->dccpav_buf_ackno)) { + const u64 delta = dccp_delta_seqno(av->dccpav_buf_ackno, + ackno); + + /* + * Look if the state of this packet is the same as the + * previous ackno and if so if we can bump the head len. + */ + if (delta == 1 && + dccp_ackvec_state(av, av->dccpav_buf_head) == state && + (dccp_ackvec_len(av, av->dccpav_buf_head) < + DCCP_ACKVEC_LEN_MASK)) + av->dccpav_buf[av->dccpav_buf_head]++; + else if (dccp_ackvec_set_buf_head_state(av, delta, state)) + return -ENOBUFS; + } else { + /* + * A.1.2. Old Packets + * + * When a packet with Sequence Number S arrives, and + * S <= buf_ackno, the HC-Receiver will scan the table + * for the byte corresponding to S. (Indexing structures + * could reduce the complexity of this scan.) + */ + u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno); + unsigned int index = av->dccpav_buf_head; + + while (1) { + const u8 len = dccp_ackvec_len(av, index); + const u8 state = dccp_ackvec_state(av, index); + /* + * valid packets not yet in dccpav_buf have a reserved + * entry, with a len equal to 0. + */ + if (state == DCCP_ACKVEC_STATE_NOT_RECEIVED && + len == 0 && delta == 0) { /* Found our + reserved seat! */ + dccp_pr_debug("Found %llu reserved seat!\n", + (unsigned long long)ackno); + av->dccpav_buf[index] = state; + goto out; + } + /* len == 0 means one packet */ + if (delta < len + 1) + goto out_duplicate; + + delta -= len + 1; + if (++index == av->dccpav_buf_len) + index = 0; + } + } + + av->dccpav_buf_ackno = ackno; + dccp_timestamp(sk, &av->dccpav_time); +out: + dccp_pr_debug(""); + return 0; + +out_duplicate: + /* Duplicate packet */ + dccp_pr_debug("Received a dup or already considered lost " + "packet: %llu\n", (unsigned long long)ackno); + return -EILSEQ; +} + +#ifdef CONFIG_IP_DCCP_DEBUG +void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) +{ + if (!dccp_debug) + return; + + printk("ACK vector len=%d, ackno=%llu |", len, + (unsigned long long)ackno); + + while (len--) { + const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; + const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; + + printk("%d,%d|", state, rl); + ++vector; + } + + printk("\n"); +} + +void dccp_ackvec_print(const struct dccp_ackvec *av) +{ + dccp_ackvector_print(av->dccpav_buf_ackno, + av->dccpav_buf + av->dccpav_buf_head, + av->dccpav_vec_len); +} +#endif + +static void dccp_ackvec_trow_away_ack_record(struct dccp_ackvec *av) +{ + /* + * As we're keeping track of the ack vector size (dccpav_vec_len) and + * the sent ack vector size (dccpav_sent_len) we don't need + * dccpav_buf_tail at all, but keep this code here as in the future + * we'll implement a vector of ack records, as suggested in + * draft-ietf-dccp-spec-11.txt Appendix A. -acme + */ +#if 0 + av->dccpav_buf_tail = av->dccpav_ack_ptr + 1; + if (av->dccpav_buf_tail >= av->dccpav_vec_len) + av->dccpav_buf_tail -= av->dccpav_vec_len; +#endif + av->dccpav_vec_len -= av->dccpav_sent_len; +} + +void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, + const u64 ackno) +{ + /* Check if we actually sent an ACK vector */ + if (av->dccpav_ack_seqno == DCCP_MAX_SEQNO + 1) + return; + + if (ackno == av->dccpav_ack_seqno) { +#ifdef CONFIG_IP_DCCP_DEBUG + struct dccp_sock *dp = dccp_sk(sk); + const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? + "CLIENT rx ack: " : "server rx ack: "; +#endif + dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, " + "ack_ackno=%llu, ACKED!\n", + debug_prefix, 1, + (unsigned long long)av->dccpav_ack_seqno, + (unsigned long long)av->dccpav_ack_ackno); + dccp_ackvec_trow_away_ack_record(av); + av->dccpav_ack_seqno = DCCP_MAX_SEQNO + 1; + } +} + +static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, + struct sock *sk, u64 ackno, + const unsigned char len, + const unsigned char *vector) +{ + unsigned char i; + + /* Check if we actually sent an ACK vector */ + if (av->dccpav_ack_seqno == DCCP_MAX_SEQNO + 1) + return; + /* + * We're in the receiver half connection, so if the received an ACK + * vector ackno (e.g. 50) before dccpav_ack_seqno (e.g. 52), we're + * not interested. + * + * Extra explanation with example: + * + * if we received an ACK vector with ackno 50, it can only be acking + * 50, 49, 48, etc, not 52 (the seqno for the ACK vector we sent). + */ + /* dccp_pr_debug("is %llu < %llu? ", ackno, av->dccpav_ack_seqno); */ + if (before48(ackno, av->dccpav_ack_seqno)) { + /* dccp_pr_debug_cat("yes\n"); */ + return; + } + /* dccp_pr_debug_cat("no\n"); */ + + i = len; + while (i--) { + const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; + u64 ackno_end_rl; + + dccp_set_seqno(&ackno_end_rl, ackno - rl); + + /* + * dccp_pr_debug("is %llu <= %llu <= %llu? ", ackno_end_rl, + * av->dccpav_ack_seqno, ackno); + */ + if (between48(av->dccpav_ack_seqno, ackno_end_rl, ackno)) { + const u8 state = (*vector & + DCCP_ACKVEC_STATE_MASK) >> 6; + /* dccp_pr_debug_cat("yes\n"); */ + + if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { +#ifdef CONFIG_IP_DCCP_DEBUG + struct dccp_sock *dp = dccp_sk(sk); + const char *debug_prefix = + dp->dccps_role == DCCP_ROLE_CLIENT ? + "CLIENT rx ack: " : "server rx ack: "; +#endif + dccp_pr_debug("%sACK vector 0, len=%d, " + "ack_seqno=%llu, ack_ackno=%llu, " + "ACKED!\n", + debug_prefix, len, + (unsigned long long) + av->dccpav_ack_seqno, + (unsigned long long) + av->dccpav_ack_ackno); + dccp_ackvec_trow_away_ack_record(av); + } + /* + * If dccpav_ack_seqno was not received, no problem + * we'll send another ACK vector. + */ + av->dccpav_ack_seqno = DCCP_MAX_SEQNO + 1; + break; + } + /* dccp_pr_debug_cat("no\n"); */ + + dccp_set_seqno(&ackno, ackno_end_rl - 1); + ++vector; + } +} + +int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, + const u8 opt, const u8 *value, const u8 len) +{ + if (len > DCCP_MAX_ACKVEC_LEN) + return -1; + + /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ + dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk, + DCCP_SKB_CB(skb)->dccpd_ack_seq, + len, value); + return 0; +} diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h new file mode 100644 index 000000000000..8ca51c9191f7 --- /dev/null +++ b/net/dccp/ackvec.h @@ -0,0 +1,133 @@ +#ifndef _ACKVEC_H +#define _ACKVEC_H +/* + * net/dccp/ackvec.h + * + * An implementation of the DCCP protocol + * Copyright (c) 2005 Arnaldo Carvalho de Melo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +/* Read about the ECN nonce to see why it is 253 */ +#define DCCP_MAX_ACKVEC_LEN 253 + +#define DCCP_ACKVEC_STATE_RECEIVED 0 +#define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) +#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6) + +#define DCCP_ACKVEC_STATE_MASK 0xC0 /* 11000000 */ +#define DCCP_ACKVEC_LEN_MASK 0x3F /* 00111111 */ + +/** struct dccp_ackvec - ack vector + * + * This data structure is the one defined in the DCCP draft + * Appendix A. + * + * @dccpav_buf_head - circular buffer head + * @dccpav_buf_tail - circular buffer tail + * @dccpav_buf_ackno - ack # of the most recent packet acknowledgeable in the + * buffer (i.e. %dccpav_buf_head) + * @dccpav_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked + * by the buffer with State 0 + * + * Additionally, the HC-Receiver must keep some information about the + * Ack Vectors it has recently sent. For each packet sent carrying an + * Ack Vector, it remembers four variables: + * + * @dccpav_ack_seqno - the Sequence Number used for the packet + * (HC-Receiver seqno) + * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement. + * @dccpav_ack_ackno - the Acknowledgement Number used for the packet + * (HC-Sender seqno) + * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. + * + * @dccpav_buf_len - circular buffer length + * @dccpav_time - the time in usecs + * @dccpav_buf - circular buffer of acknowledgeable packets + */ +struct dccp_ackvec { + unsigned int dccpav_buf_head; + unsigned int dccpav_buf_tail; + u64 dccpav_buf_ackno; + u64 dccpav_ack_seqno; + u64 dccpav_ack_ackno; + unsigned int dccpav_ack_ptr; + unsigned int dccpav_sent_len; + unsigned int dccpav_vec_len; + unsigned int dccpav_buf_len; + struct timeval dccpav_time; + u8 dccpav_buf_nonce; + u8 dccpav_ack_nonce; + u8 dccpav_buf[0]; +}; + +struct sock; +struct sk_buff; + +#ifdef CONFIG_IP_DCCP_ACKVEC +extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, + const unsigned int __nocast priority); +extern void dccp_ackvec_free(struct dccp_ackvec *av); + +extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, + const u64 ackno, const u8 state); + +extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, + struct sock *sk, const u64 ackno); +extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, + const u8 opt, const u8 *value, const u8 len); + +extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); + +static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) +{ + return av->dccpav_sent_len != av->dccpav_vec_len; +} +#else /* CONFIG_IP_DCCP_ACKVEC */ +static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, + const unsigned int __nocast priority) +{ + return NULL; +} + +static inline void dccp_ackvec_free(struct dccp_ackvec *av) +{ +} + +static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, + const u64 ackno, const u8 state) +{ + return -1; +} + +static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, + struct sock *sk, const u64 ackno) +{ +} + +static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, + const u8 opt, const u8 *value, const u8 len) +{ + return -1; +} + +static inline int dccp_insert_option_ackvec(const struct sock *sk, + const struct sk_buff *skb) +{ + return -1; +} + +static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) +{ + return 0; +} +#endif /* CONFIG_IP_DCCP_ACKVEC */ +#endif /* _ACKVEC_H */ diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index be7a660b6b24..5871c027f9dc 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -17,6 +17,7 @@ #include #include #include +#include "ackvec.h" #ifdef CONFIG_IP_DCCP_DEBUG extern int dccp_debug; @@ -358,6 +359,17 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq) (dp->dccps_gss - dp->dccps_options.dccpo_sequence_window + 1)); } + +static inline int dccp_ack_pending(const struct sock *sk) +{ + const struct dccp_sock *dp = dccp_sk(sk); + return dp->dccps_timestamp_echo != 0 || +#ifdef CONFIG_IP_DCCP_ACKVEC + (dp->dccps_options.dccpo_send_ack_vector && + dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || +#endif + inet_csk_ack_scheduled(sk); +} extern void dccp_insert_options(struct sock *sk, struct sk_buff *skb); extern void dccp_insert_option_elapsed_time(struct sock *sk, @@ -371,65 +383,6 @@ extern void dccp_insert_option(struct sock *sk, struct sk_buff *skb, extern struct socket *dccp_ctl_socket; -#define DCCP_ACKPKTS_STATE_RECEIVED 0 -#define DCCP_ACKPKTS_STATE_ECN_MARKED (1 << 6) -#define DCCP_ACKPKTS_STATE_NOT_RECEIVED (3 << 6) - -#define DCCP_ACKPKTS_STATE_MASK 0xC0 /* 11000000 */ -#define DCCP_ACKPKTS_LEN_MASK 0x3F /* 00111111 */ - -/** struct dccp_ackpkts - acknowledgeable packets - * - * This data structure is the one defined in the DCCP draft - * Appendix A. - * - * @dccpap_buf_head - circular buffer head - * @dccpap_buf_tail - circular buffer tail - * @dccpap_buf_ackno - ack # of the most recent packet acknowledgeable in the - * buffer (i.e. %dccpap_buf_head) - * @dccpap_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked - * by the buffer with State 0 - * - * Additionally, the HC-Receiver must keep some information about the - * Ack Vectors it has recently sent. For each packet sent carrying an - * Ack Vector, it remembers four variables: - * - * @dccpap_ack_seqno - the Sequence Number used for the packet - * (HC-Receiver seqno) - * @dccpap_ack_ptr - the value of buf_head at the time of acknowledgement. - * @dccpap_ack_ackno - the Acknowledgement Number used for the packet - * (HC-Sender seqno) - * @dccpap_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. - * - * @dccpap_buf_len - circular buffer length - * @dccpap_time - the time in usecs - * @dccpap_buf - circular buffer of acknowledgeable packets - */ -struct dccp_ackpkts { - unsigned int dccpap_buf_head; - unsigned int dccpap_buf_tail; - u64 dccpap_buf_ackno; - u64 dccpap_ack_seqno; - u64 dccpap_ack_ackno; - unsigned int dccpap_ack_ptr; - unsigned int dccpap_buf_vector_len; - unsigned int dccpap_ack_vector_len; - unsigned int dccpap_buf_len; - struct timeval dccpap_time; - u8 dccpap_buf_nonce; - u8 dccpap_ack_nonce; - u8 dccpap_buf[0]; -}; - -extern struct dccp_ackpkts * - dccp_ackpkts_alloc(unsigned int len, - const unsigned int __nocast priority); -extern void dccp_ackpkts_free(struct dccp_ackpkts *ap); -extern int dccp_ackpkts_add(struct dccp_ackpkts *ap, const struct sock *sk, - u64 ackno, u8 state); -extern void dccp_ackpkts_check_rcv_ackno(struct dccp_ackpkts *ap, - struct sock *sk, u64 ackno); - extern void dccp_timestamp(const struct sock *sk, struct timeval *tv); static inline suseconds_t timeval_usecs(const struct timeval *tv) @@ -470,15 +423,4 @@ static inline void timeval_sub_usecs(struct timeval *tv, } } -#ifdef CONFIG_IP_DCCP_DEBUG -extern void dccp_ackvector_print(const u64 ackno, - const unsigned char *vector, int len); -extern void dccp_ackpkts_print(const struct dccp_ackpkts *ap); -#else -static inline void dccp_ackvector_print(const u64 ackno, - const unsigned char *vector, - int len) { } -static inline void dccp_ackpkts_print(const struct dccp_ackpkts *ap) { } -#endif - #endif /* _DCCP_H */ diff --git a/net/dccp/input.c b/net/dccp/input.c index 062e9f8359d0..1b6b2cb12376 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -16,6 +16,7 @@ #include +#include "ackvec.h" #include "ccid.h" #include "dccp.h" @@ -60,8 +61,8 @@ static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) struct dccp_sock *dp = dccp_sk(sk); if (dp->dccps_options.dccpo_send_ack_vector) - dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk, - DCCP_SKB_CB(skb)->dccpd_ack_seq); + dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk, + DCCP_SKB_CB(skb)->dccpd_ack_seq); } static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) @@ -164,37 +165,11 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) dccp_event_ack_recv(sk, skb); - /* - * FIXME: check ECN to see if we should use - * DCCP_ACKPKTS_STATE_ECN_MARKED - */ - if (dp->dccps_options.dccpo_send_ack_vector) { - struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts; - - if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk, - DCCP_SKB_CB(skb)->dccpd_seq, - DCCP_ACKPKTS_STATE_RECEIVED)) { - LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable " - "packets buffer full!\n"); - ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1; - inet_csk_schedule_ack(sk); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - TCP_DELACK_MIN, - DCCP_RTO_MAX); - goto discard; - } - - /* - * FIXME: this activation is probably wrong, have to study more - * TCP delack machinery and how it fits into DCCP draft, but - * for now it kinda "works" 8) - */ - if (!inet_csk_ack_scheduled(sk)) { - inet_csk_schedule_ack(sk); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ, - DCCP_RTO_MAX); - } - } + if (dp->dccps_options.dccpo_send_ack_vector && + dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, + DCCP_SKB_CB(skb)->dccpd_seq, + DCCP_ACKVEC_STATE_RECEIVED)) + goto discard; ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); @@ -495,29 +470,11 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); - /* - * FIXME: check ECN to see if we should use - * DCCP_ACKPKTS_STATE_ECN_MARKED - */ - if (dp->dccps_options.dccpo_send_ack_vector) { - if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts, sk, - dcb->dccpd_seq, - DCCP_ACKPKTS_STATE_RECEIVED)) - goto discard; - /* - * FIXME: this activation is probably wrong, have to - * study more TCP delack machinery and how it fits into - * DCCP draft, but for now it kinda "works" 8) - */ - if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno == - DCCP_MAX_SEQNO + 1) && - !inet_csk_ack_scheduled(sk)) { - inet_csk_schedule_ack(sk); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - TCP_DELACK_MIN, - DCCP_RTO_MAX); - } - } + if (dp->dccps_options.dccpo_send_ack_vector && + dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, + DCCP_SKB_CB(skb)->dccpd_seq, + DCCP_ACKVEC_STATE_RECEIVED)) + goto discard; } /* diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 94a440b2685b..82434e4a42df 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -23,6 +23,7 @@ #include #include +#include "ackvec.h" #include "ccid.h" #include "dccp.h" @@ -1112,45 +1113,7 @@ int dccp_v4_rcv(struct sk_buff *skb) goto discard_it; dh = dccp_hdr(skb); -#if 0 - /* - * Use something like this to simulate some DATA/DATAACK loss to test - * dccp_ackpkts_add, you'll get something like this on a session that - * sends 10 DATA/DATAACK packets: - * - * ackpkts_print: 281473596467422 |0,0|3,0|0,0|3,0|0,0|3,0|0,0|3,0|0,1| - * - * 0, 0 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == just this packet - * 0, 1 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == two adjacent packets - * with the same state - * 3, 0 means: DCCP_ACKPKTS_STATE_NOT_RECEIVED, RLE == just this packet - * - * So... - * - * 281473596467422 was received - * 281473596467421 was not received - * 281473596467420 was received - * 281473596467419 was not received - * 281473596467418 was received - * 281473596467417 was not received - * 281473596467416 was received - * 281473596467415 was not received - * 281473596467414 was received - * 281473596467413 was received (this one was the 3way handshake - * RESPONSE) - * - */ - if (dh->dccph_type == DCCP_PKT_DATA || - dh->dccph_type == DCCP_PKT_DATAACK) { - static int discard = 0; - if (discard) { - discard = 0; - goto discard_it; - } - discard = 1; - } -#endif DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; @@ -1264,11 +1227,9 @@ static int dccp_v4_init_sock(struct sock *sk) do_gettimeofday(&dp->dccps_epoch); if (dp->dccps_options.dccpo_send_ack_vector) { - dp->dccps_hc_rx_ackpkts = - dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN, - GFP_KERNEL); - - if (dp->dccps_hc_rx_ackpkts == NULL) + dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(DCCP_MAX_ACKVEC_LEN, + GFP_KERNEL); + if (dp->dccps_hc_rx_ackvec == NULL) return -ENOMEM; } @@ -1288,8 +1249,10 @@ static int dccp_v4_init_sock(struct sock *sk) dp->dccps_hc_tx_ccid == NULL) { ccid_exit(dp->dccps_hc_rx_ccid, sk); ccid_exit(dp->dccps_hc_tx_ccid, sk); - dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts); - dp->dccps_hc_rx_ackpkts = NULL; + if (dp->dccps_options.dccpo_send_ack_vector) { + dccp_ackvec_free(dp->dccps_hc_rx_ackvec); + dp->dccps_hc_rx_ackvec = NULL; + } dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; return -ENOMEM; } @@ -1331,8 +1294,10 @@ static int dccp_v4_destroy_sock(struct sock *sk) ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); - dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts); - dp->dccps_hc_rx_ackpkts = NULL; + if (dp->dccps_options.dccpo_send_ack_vector) { + dccp_ackvec_free(dp->dccps_hc_rx_ackvec); + dp->dccps_hc_rx_ackvec = NULL; + } ccid_exit(dp->dccps_hc_rx_ccid, sk); ccid_exit(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 933e10db1789..1393461898bb 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -19,6 +19,7 @@ #include #include +#include "ackvec.h" #include "ccid.h" #include "dccp.h" @@ -94,23 +95,23 @@ struct sock *dccp_create_openreq_child(struct sock *sk, struct dccp_sock *newdp = dccp_sk(newsk); newdp->dccps_role = DCCP_ROLE_SERVER; - newdp->dccps_hc_rx_ackpkts = NULL; + newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; newdp->dccps_service = dreq->dreq_service; newicsk->icsk_rto = DCCP_TIMEOUT_INIT; do_gettimeofday(&newdp->dccps_epoch); if (newdp->dccps_options.dccpo_send_ack_vector) { - newdp->dccps_hc_rx_ackpkts = - dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN, - GFP_ATOMIC); + newdp->dccps_hc_rx_ackvec = + dccp_ackvec_alloc(DCCP_MAX_ACKVEC_LEN, + GFP_ATOMIC); /* * XXX: We're using the same CCIDs set on the parent, * i.e. sk_clone copied the master sock and left the * CCID pointers for this child, that is why we do the * __ccid_get calls. */ - if (unlikely(newdp->dccps_hc_rx_ackpkts == NULL)) + if (unlikely(newdp->dccps_hc_rx_ackvec == NULL)) goto out_free; } @@ -118,7 +119,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, newsk) != 0 || ccid_hc_tx_init(newdp->dccps_hc_tx_ccid, newsk) != 0)) { - dccp_ackpkts_free(newdp->dccps_hc_rx_ackpkts); + dccp_ackvec_free(newdp->dccps_hc_rx_ackvec); ccid_hc_rx_exit(newdp->dccps_hc_rx_ccid, newsk); ccid_hc_tx_exit(newdp->dccps_hc_tx_ccid, newsk); out_free: diff --git a/net/dccp/options.c b/net/dccp/options.c index d4c4242d8dd7..c480c506a4a4 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -18,15 +18,10 @@ #include #include +#include "ackvec.h" #include "ccid.h" #include "dccp.h" -static void dccp_ackpkts_check_rcv_ackvector(struct dccp_ackpkts *ap, - struct sock *sk, - const u64 ackno, - const unsigned char len, - const unsigned char *vector); - /* stores the default values for new connection. may be changed with sysctl */ static const struct dccp_options dccpo_default_values = { .dccpo_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW, @@ -113,25 +108,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) opt_recv->dccpor_ndp); break; case DCCPO_ACK_VECTOR_0: - if (len > DCCP_MAX_ACK_VECTOR_LEN) - goto out_invalid_option; - + case DCCPO_ACK_VECTOR_1: if (pkt_type == DCCP_PKT_DATA) continue; - opt_recv->dccpor_ack_vector_len = len; - opt_recv->dccpor_ack_vector_idx = value - options; - - dccp_pr_debug("%sACK vector 0, len=%d, ack_ackno=%llu\n", - debug_prefix, len, - (unsigned long long) - DCCP_SKB_CB(skb)->dccpd_ack_seq); - dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, - value, len); - dccp_ackpkts_check_rcv_ackvector(dp->dccps_hc_rx_ackpkts, - sk, - DCCP_SKB_CB(skb)->dccpd_ack_seq, - len, value); + if (dp->dccps_options.dccpo_send_ack_vector && + dccp_ackvec_parse(sk, skb, opt, value, len)) + goto out_invalid_option; break; case DCCPO_TIMESTAMP: if (len != 4) @@ -352,86 +335,6 @@ void dccp_insert_option_elapsed_time(struct sock *sk, EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time); -static void dccp_insert_option_ack_vector(struct sock *sk, struct sk_buff *skb) -{ - struct dccp_sock *dp = dccp_sk(sk); -#ifdef CONFIG_IP_DCCP_DEBUG - const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? - "CLIENT TX opt: " : "server TX opt: "; -#endif - struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts; - int len = ap->dccpap_buf_vector_len + 2; - struct timeval now; - u32 elapsed_time; - unsigned char *to, *from; - - dccp_timestamp(sk, &now); - elapsed_time = timeval_delta(&now, &ap->dccpap_time) / 10; - - if (elapsed_time != 0) - dccp_insert_option_elapsed_time(sk, skb, elapsed_time); - - if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) { - LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to " - "insert ACK Vector!\n"); - return; - } - - /* - * XXX: now we have just one ack vector sent record, so - * we have to wait for it to be cleared. - * - * Of course this is not acceptable, but this is just for - * basic testing now. - */ - if (ap->dccpap_ack_seqno != DCCP_MAX_SEQNO + 1) - return; - - DCCP_SKB_CB(skb)->dccpd_opt_len += len; - - to = skb_push(skb, len); - *to++ = DCCPO_ACK_VECTOR_0; - *to++ = len; - - len = ap->dccpap_buf_vector_len; - from = ap->dccpap_buf + ap->dccpap_buf_head; - - /* Check if buf_head wraps */ - if (ap->dccpap_buf_head + len > ap->dccpap_buf_len) { - const unsigned int tailsize = (ap->dccpap_buf_len - - ap->dccpap_buf_head); - - memcpy(to, from, tailsize); - to += tailsize; - len -= tailsize; - from = ap->dccpap_buf; - } - - memcpy(to, from, len); - /* - * From draft-ietf-dccp-spec-11.txt: - * - * For each acknowledgement it sends, the HC-Receiver will add an - * acknowledgement record. ack_seqno will equal the HC-Receiver - * sequence number it used for the ack packet; ack_ptr will equal - * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will - * equal buf_nonce. - * - * This implemention uses just one ack record for now. - */ - ap->dccpap_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; - ap->dccpap_ack_ptr = ap->dccpap_buf_head; - ap->dccpap_ack_ackno = ap->dccpap_buf_ackno; - ap->dccpap_ack_nonce = ap->dccpap_buf_nonce; - ap->dccpap_ack_vector_len = ap->dccpap_buf_vector_len; - - dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, " - "ack_ackno=%llu\n", - debug_prefix, ap->dccpap_ack_vector_len, - (unsigned long long) ap->dccpap_ack_seqno, - (unsigned long long) ap->dccpap_ack_ackno); -} - void dccp_timestamp(const struct sock *sk, struct timeval *tv) { const struct dccp_sock *dp = dccp_sk(sk); @@ -528,9 +431,8 @@ void dccp_insert_options(struct sock *sk, struct sk_buff *skb) if (!dccp_packet_without_ack(skb)) { if (dp->dccps_options.dccpo_send_ack_vector && - (dp->dccps_hc_rx_ackpkts->dccpap_buf_ackno != - DCCP_MAX_SEQNO + 1)) - dccp_insert_option_ack_vector(sk, skb); + dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) + dccp_insert_option_ackvec(sk, skb); if (dp->dccps_timestamp_echo != 0) dccp_insert_option_timestamp_echo(sk, skb); } @@ -557,331 +459,3 @@ void dccp_insert_options(struct sock *sk, struct sk_buff *skb) } } } - -struct dccp_ackpkts *dccp_ackpkts_alloc(const unsigned int len, - const unsigned int __nocast priority) -{ - struct dccp_ackpkts *ap = kmalloc(sizeof(*ap) + len, priority); - - if (ap != NULL) { -#ifdef CONFIG_IP_DCCP_DEBUG - memset(ap->dccpap_buf, 0xFF, len); -#endif - ap->dccpap_buf_len = len; - ap->dccpap_buf_head = - ap->dccpap_buf_tail = - ap->dccpap_buf_len - 1; - ap->dccpap_buf_ackno = - ap->dccpap_ack_ackno = - ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1; - ap->dccpap_buf_nonce = ap->dccpap_buf_nonce = 0; - ap->dccpap_ack_ptr = 0; - ap->dccpap_time.tv_sec = 0; - ap->dccpap_time.tv_usec = 0; - ap->dccpap_buf_vector_len = ap->dccpap_ack_vector_len = 0; - } - - return ap; -} - -void dccp_ackpkts_free(struct dccp_ackpkts *ap) -{ - if (ap != NULL) { -#ifdef CONFIG_IP_DCCP_DEBUG - memset(ap, 0xFF, sizeof(*ap) + ap->dccpap_buf_len); -#endif - kfree(ap); - } -} - -static inline u8 dccp_ackpkts_state(const struct dccp_ackpkts *ap, - const unsigned int index) -{ - return ap->dccpap_buf[index] & DCCP_ACKPKTS_STATE_MASK; -} - -static inline u8 dccp_ackpkts_len(const struct dccp_ackpkts *ap, - const unsigned int index) -{ - return ap->dccpap_buf[index] & DCCP_ACKPKTS_LEN_MASK; -} - -/* - * If several packets are missing, the HC-Receiver may prefer to enter multiple - * bytes with run length 0, rather than a single byte with a larger run length; - * this simplifies table updates if one of the missing packets arrives. - */ -static inline int dccp_ackpkts_set_buf_head_state(struct dccp_ackpkts *ap, - const unsigned int packets, - const unsigned char state) -{ - unsigned int gap; - signed long new_head; - - if (ap->dccpap_buf_vector_len + packets > ap->dccpap_buf_len) - return -ENOBUFS; - - gap = packets - 1; - new_head = ap->dccpap_buf_head - packets; - - if (new_head < 0) { - if (gap > 0) { - memset(ap->dccpap_buf, DCCP_ACKPKTS_STATE_NOT_RECEIVED, - gap + new_head + 1); - gap = -new_head; - } - new_head += ap->dccpap_buf_len; - } - - ap->dccpap_buf_head = new_head; - - if (gap > 0) - memset(ap->dccpap_buf + ap->dccpap_buf_head + 1, - DCCP_ACKPKTS_STATE_NOT_RECEIVED, gap); - - ap->dccpap_buf[ap->dccpap_buf_head] = state; - ap->dccpap_buf_vector_len += packets; - return 0; -} - -/* - * Implements the draft-ietf-dccp-spec-11.txt Appendix A - */ -int dccp_ackpkts_add(struct dccp_ackpkts *ap, const struct sock *sk, - u64 ackno, u8 state) -{ - /* - * Check at the right places if the buffer is full, if it is, tell the - * caller to start dropping packets till the HC-Sender acks our ACK - * vectors, when we will free up space in dccpap_buf. - * - * We may well decide to do buffer compression, etc, but for now lets - * just drop. - * - * From Appendix A: - * - * Of course, the circular buffer may overflow, either when the - * HC-Sender is sending data at a very high rate, when the - * HC-Receiver's acknowledgements are not reaching the HC-Sender, - * or when the HC-Sender is forgetting to acknowledge those acks - * (so the HC-Receiver is unable to clean up old state). In this - * case, the HC-Receiver should either compress the buffer (by - * increasing run lengths when possible), transfer its state to - * a larger buffer, or, as a last resort, drop all received - * packets, without processing them whatsoever, until its buffer - * shrinks again. - */ - - /* See if this is the first ackno being inserted */ - if (ap->dccpap_buf_vector_len == 0) { - ap->dccpap_buf[ap->dccpap_buf_head] = state; - ap->dccpap_buf_vector_len = 1; - } else if (after48(ackno, ap->dccpap_buf_ackno)) { - const u64 delta = dccp_delta_seqno(ap->dccpap_buf_ackno, - ackno); - - /* - * Look if the state of this packet is the same as the - * previous ackno and if so if we can bump the head len. - */ - if (delta == 1 && - dccp_ackpkts_state(ap, ap->dccpap_buf_head) == state && - (dccp_ackpkts_len(ap, ap->dccpap_buf_head) < - DCCP_ACKPKTS_LEN_MASK)) - ap->dccpap_buf[ap->dccpap_buf_head]++; - else if (dccp_ackpkts_set_buf_head_state(ap, delta, state)) - return -ENOBUFS; - } else { - /* - * A.1.2. Old Packets - * - * When a packet with Sequence Number S arrives, and - * S <= buf_ackno, the HC-Receiver will scan the table - * for the byte corresponding to S. (Indexing structures - * could reduce the complexity of this scan.) - */ - u64 delta = dccp_delta_seqno(ackno, ap->dccpap_buf_ackno); - unsigned int index = ap->dccpap_buf_head; - - while (1) { - const u8 len = dccp_ackpkts_len(ap, index); - const u8 state = dccp_ackpkts_state(ap, index); - /* - * valid packets not yet in dccpap_buf have a reserved - * entry, with a len equal to 0. - */ - if (state == DCCP_ACKPKTS_STATE_NOT_RECEIVED && - len == 0 && delta == 0) { /* Found our - reserved seat! */ - dccp_pr_debug("Found %llu reserved seat!\n", - (unsigned long long) ackno); - ap->dccpap_buf[index] = state; - goto out; - } - /* len == 0 means one packet */ - if (delta < len + 1) - goto out_duplicate; - - delta -= len + 1; - if (++index == ap->dccpap_buf_len) - index = 0; - } - } - - ap->dccpap_buf_ackno = ackno; - dccp_timestamp(sk, &ap->dccpap_time); -out: - dccp_pr_debug(""); - dccp_ackpkts_print(ap); - return 0; - -out_duplicate: - /* Duplicate packet */ - dccp_pr_debug("Received a dup or already considered lost " - "packet: %llu\n", (unsigned long long) ackno); - return -EILSEQ; -} - -#ifdef CONFIG_IP_DCCP_DEBUG -void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, - int len) -{ - if (!dccp_debug) - return; - - printk("ACK vector len=%d, ackno=%llu |", len, - (unsigned long long) ackno); - - while (len--) { - const u8 state = (*vector & DCCP_ACKPKTS_STATE_MASK) >> 6; - const u8 rl = (*vector & DCCP_ACKPKTS_LEN_MASK); - - printk("%d,%d|", state, rl); - ++vector; - } - - printk("\n"); -} - -void dccp_ackpkts_print(const struct dccp_ackpkts *ap) -{ - dccp_ackvector_print(ap->dccpap_buf_ackno, - ap->dccpap_buf + ap->dccpap_buf_head, - ap->dccpap_buf_vector_len); -} -#endif - -static void dccp_ackpkts_trow_away_ack_record(struct dccp_ackpkts *ap) -{ - /* - * As we're keeping track of the ack vector size - * (dccpap_buf_vector_len) and the sent ack vector size - * (dccpap_ack_vector_len) we don't need dccpap_buf_tail at all, but - * keep this code here as in the future we'll implement a vector of - * ack records, as suggested in draft-ietf-dccp-spec-11.txt - * Appendix A. -acme - */ -#if 0 - ap->dccpap_buf_tail = ap->dccpap_ack_ptr + 1; - if (ap->dccpap_buf_tail >= ap->dccpap_buf_len) - ap->dccpap_buf_tail -= ap->dccpap_buf_len; -#endif - ap->dccpap_buf_vector_len -= ap->dccpap_ack_vector_len; -} - -void dccp_ackpkts_check_rcv_ackno(struct dccp_ackpkts *ap, struct sock *sk, - u64 ackno) -{ - /* Check if we actually sent an ACK vector */ - if (ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1) - return; - - if (ackno == ap->dccpap_ack_seqno) { -#ifdef CONFIG_IP_DCCP_DEBUG - struct dccp_sock *dp = dccp_sk(sk); - const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? - "CLIENT rx ack: " : "server rx ack: "; -#endif - dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, " - "ack_ackno=%llu, ACKED!\n", - debug_prefix, 1, - (unsigned long long) ap->dccpap_ack_seqno, - (unsigned long long) ap->dccpap_ack_ackno); - dccp_ackpkts_trow_away_ack_record(ap); - ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1; - } -} - -static void dccp_ackpkts_check_rcv_ackvector(struct dccp_ackpkts *ap, - struct sock *sk, u64 ackno, - const unsigned char len, - const unsigned char *vector) -{ - unsigned char i; - - /* Check if we actually sent an ACK vector */ - if (ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1) - return; - /* - * We're in the receiver half connection, so if the received an ACK - * vector ackno (e.g. 50) before dccpap_ack_seqno (e.g. 52), we're - * not interested. - * - * Extra explanation with example: - * - * if we received an ACK vector with ackno 50, it can only be acking - * 50, 49, 48, etc, not 52 (the seqno for the ACK vector we sent). - */ - /* dccp_pr_debug("is %llu < %llu? ", ackno, ap->dccpap_ack_seqno); */ - if (before48(ackno, ap->dccpap_ack_seqno)) { - /* dccp_pr_debug_cat("yes\n"); */ - return; - } - /* dccp_pr_debug_cat("no\n"); */ - - i = len; - while (i--) { - const u8 rl = (*vector & DCCP_ACKPKTS_LEN_MASK); - u64 ackno_end_rl; - - dccp_set_seqno(&ackno_end_rl, ackno - rl); - - /* - * dccp_pr_debug("is %llu <= %llu <= %llu? ", ackno_end_rl, - * ap->dccpap_ack_seqno, ackno); - */ - if (between48(ap->dccpap_ack_seqno, ackno_end_rl, ackno)) { - const u8 state = (*vector & - DCCP_ACKPKTS_STATE_MASK) >> 6; - /* dccp_pr_debug_cat("yes\n"); */ - - if (state != DCCP_ACKPKTS_STATE_NOT_RECEIVED) { -#ifdef CONFIG_IP_DCCP_DEBUG - struct dccp_sock *dp = dccp_sk(sk); - const char *debug_prefix = - dp->dccps_role == DCCP_ROLE_CLIENT ? - "CLIENT rx ack: " : "server rx ack: "; -#endif - dccp_pr_debug("%sACK vector 0, len=%d, " - "ack_seqno=%llu, ack_ackno=%llu, " - "ACKED!\n", - debug_prefix, len, - (unsigned long long) - ap->dccpap_ack_seqno, - (unsigned long long) - ap->dccpap_ack_ackno); - dccp_ackpkts_trow_away_ack_record(ap); - } - /* - * If dccpap_ack_seqno was not received, no problem - * we'll send another ACK vector. - */ - ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1; - break; - } - /* dccp_pr_debug_cat("no\n"); */ - - dccp_set_seqno(&ackno, ackno_end_rl - 1); - ++vector; - } -} diff --git a/net/dccp/output.c b/net/dccp/output.c index 156b1d29a156..4786bdcddcc9 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -16,6 +16,7 @@ #include +#include "ackvec.h" #include "ccid.h" #include "dccp.h" @@ -225,7 +226,6 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) err = dccp_wait_for_ccid(sk, skb, timeo); if (err == 0) { - const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); const int len = skb->len; @@ -236,15 +236,7 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) inet_csk(sk)->icsk_rto, DCCP_RTO_MAX); dcb->dccpd_type = DCCP_PKT_DATAACK; - /* - * FIXME: we really should have a - * dccps_ack_pending or use icsk. - */ - } else if (inet_csk_ack_scheduled(sk) || - dp->dccps_timestamp_echo != 0 || - (dp->dccps_options.dccpo_send_ack_vector && - ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 && - ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1)) + } else if (dccp_ack_pending(sk)) dcb->dccpd_type = DCCP_PKT_DATAACK; else dcb->dccpd_type = DCCP_PKT_DATA; -- cgit v1.2.3 From 65299d6c3cfb49cc3eee4fc483e7edd23ea7b2ed Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 18 Sep 2005 00:18:32 -0700 Subject: [CCID3]: Introduce include/linux/tfrc.h Moving the TFRC sender and receiver variables to separate structs, so that we can copy these structs to userspace thru getsockopt, dccp_diag, etc. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/tfrc.h | 35 +++++++++++++++++++++++++++++++++++ net/dccp/ccids/ccid3.h | 23 +++++++++++++---------- 2 files changed, 48 insertions(+), 10 deletions(-) create mode 100644 include/linux/tfrc.h (limited to 'include/linux') diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h new file mode 100644 index 000000000000..7dab7831c3cb --- /dev/null +++ b/include/linux/tfrc.h @@ -0,0 +1,35 @@ +#ifndef _LINUX_TFRC_H_ +#define _LINUX_TFRC_H_ +/* + * include/linux/tfrc.h + * + * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Arnaldo Carvalho de Melo + * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +struct tfrc_rx_info { + __u32 tfrcrx_x_recv; + __u32 tfrcrx_rtt; + __u32 tfrcrx_p; +}; + +struct tfrc_tx_info { + __u32 tfrctx_x; + __u32 tfrctx_x_recv; + __u32 tfrctx_x_calc; + __u32 tfrctx_rtt; + __u32 tfrctx_p; + __u32 tfrctx_rto; + __u32 tfrctx_ipi; +}; + +#endif /* _LINUX_TFRC_H_ */ diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index eb248778eea3..0bde4583d091 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h @@ -40,6 +40,7 @@ #include #include #include +#include #define TFRC_MIN_PACKET_SIZE 16 #define TFRC_STD_PACKET_SIZE 256 @@ -93,12 +94,15 @@ struct ccid3_options_received { * @ccid3hctx_hist - Packet history */ struct ccid3_hc_tx_sock { - u32 ccid3hctx_x; - u32 ccid3hctx_x_recv; - u32 ccid3hctx_x_calc; + struct tfrc_tx_info ccid3hctx_tfrc; +#define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x +#define ccid3hctx_x_recv ccid3hctx_tfrc.tfrctx_x_recv +#define ccid3hctx_x_calc ccid3hctx_tfrc.tfrctx_x_calc +#define ccid3hctx_rtt ccid3hctx_tfrc.tfrctx_rtt +#define ccid3hctx_p ccid3hctx_tfrc.tfrctx_p +#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto +#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi u16 ccid3hctx_s; - u32 ccid3hctx_rtt; - u32 ccid3hctx_p; u8 ccid3hctx_state; u8 ccid3hctx_last_win_count; u8 ccid3hctx_idle; @@ -106,19 +110,19 @@ struct ccid3_hc_tx_sock { struct timer_list ccid3hctx_no_feedback_timer; struct timeval ccid3hctx_t_ld; struct timeval ccid3hctx_t_nom; - u32 ccid3hctx_t_rto; - u32 ccid3hctx_t_ipi; u32 ccid3hctx_delta; struct list_head ccid3hctx_hist; struct ccid3_options_received ccid3hctx_options_received; }; struct ccid3_hc_rx_sock { + struct tfrc_rx_info ccid3hcrx_tfrc; +#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv +#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt +#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p u64 ccid3hcrx_seqno_last_counter:48, ccid3hcrx_state:8, ccid3hcrx_last_counter:4; - u32 ccid3hcrx_rtt; - u32 ccid3hcrx_p; u32 ccid3hcrx_bytes_recv; struct timeval ccid3hcrx_tstamp_last_feedback; struct timeval ccid3hcrx_tstamp_last_ack; @@ -127,7 +131,6 @@ struct ccid3_hc_rx_sock { u16 ccid3hcrx_s; u32 ccid3hcrx_pinv; u32 ccid3hcrx_elapsed_time; - u32 ccid3hcrx_x_recv; }; static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) -- cgit v1.2.3 From 561713cf475de1f671cc89c437927ec008a20209 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 18 Sep 2005 00:18:52 -0700 Subject: [DCCP]: Don't use necessarily the same CCID for tx and rx Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/dccp.h | 3 ++- net/dccp/ipv4.c | 4 ++-- net/dccp/options.c | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 8c8e029095a5..13f9b78483fc 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -330,7 +330,8 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) */ struct dccp_options { __u64 dccpo_sequence_window; - __u8 dccpo_ccid; + __u8 dccpo_rx_ccid; + __u8 dccpo_tx_ccid; __u8 dccpo_send_ack_vector; __u8 dccpo_send_ndp_count; }; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 82434e4a42df..40fe6afacde6 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -1241,9 +1241,9 @@ static int dccp_v4_init_sock(struct sock *sk) * setsockopt(CCIDs-I-want/accept). -acme */ if (likely(!dccp_ctl_socket_init)) { - dp->dccps_hc_rx_ccid = ccid_init(dp->dccps_options.dccpo_ccid, + dp->dccps_hc_rx_ccid = ccid_init(dp->dccps_options.dccpo_rx_ccid, sk); - dp->dccps_hc_tx_ccid = ccid_init(dp->dccps_options.dccpo_ccid, + dp->dccps_hc_tx_ccid = ccid_init(dp->dccps_options.dccpo_tx_ccid, sk); if (dp->dccps_hc_rx_ccid == NULL || dp->dccps_hc_tx_ccid == NULL) { diff --git a/net/dccp/options.c b/net/dccp/options.c index c480c506a4a4..0a76426c9aea 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -25,7 +25,8 @@ /* stores the default values for new connection. may be changed with sysctl */ static const struct dccp_options dccpo_default_values = { .dccpo_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW, - .dccpo_ccid = DCCPF_INITIAL_CCID, + .dccpo_rx_ccid = DCCPF_INITIAL_CCID, + .dccpo_tx_ccid = DCCPF_INITIAL_CCID, .dccpo_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR, .dccpo_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT, }; -- cgit v1.2.3 From 88f964db6ef728982734356bf4c406270ea29c1d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 18 Sep 2005 00:19:32 -0700 Subject: [DCCP]: Introduce CCID getsockopt for the CCIDs Allocation for the optnames is similar to the DCCP options, with a range for rx and tx half connection CCIDs. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/dccp.h | 2 ++ net/dccp/ccid.h | 31 ++++++++++++++++++++++++++++ net/dccp/ccids/ccid3.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++ net/dccp/proto.c | 17 +++++++++------ 4 files changed, 100 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 13f9b78483fc..71fab4311e92 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -179,6 +179,8 @@ enum { /* DCCP socket options */ #define DCCP_SOCKOPT_PACKET_SIZE 1 #define DCCP_SOCKOPT_SERVICE 2 +#define DCCP_SOCKOPT_CCID_RX_INFO 128 +#define DCCP_SOCKOPT_CCID_TX_INFO 192 #define DCCP_SERVICE_LIST_MAX_LEN 32 diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 962f1e9e2f7e..21e55142dcd3 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -54,6 +55,14 @@ struct ccid { struct tcp_info *info); void (*ccid_hc_tx_get_info)(struct sock *sk, struct tcp_info *info); + int (*ccid_hc_rx_getsockopt)(struct sock *sk, + const int optname, int len, + u32 __user *optval, + int __user *optlen); + int (*ccid_hc_tx_getsockopt)(struct sock *sk, + const int optname, int len, + u32 __user *optval, + int __user *optlen); }; extern int ccid_register(struct ccid *ccid); @@ -177,4 +186,26 @@ static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk, if (ccid->ccid_hc_tx_get_info != NULL) ccid->ccid_hc_tx_get_info(sk, info); } + +static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, + const int optname, int len, + u32 __user *optval, int __user *optlen) +{ + int rc = -ENOPROTOOPT; + if (ccid->ccid_hc_rx_getsockopt != NULL) + rc = ccid->ccid_hc_rx_getsockopt(sk, optname, len, + optval, optlen); + return rc; +} + +static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, + const int optname, int len, + u32 __user *optval, int __user *optlen) +{ + int rc = -ENOPROTOOPT; + if (ccid->ccid_hc_tx_getsockopt != NULL) + rc = ccid->ccid_hc_tx_getsockopt(sk, optname, len, + optval, optlen); + return rc; +} #endif /* _CCID_H */ diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 38aa84986118..aa68e0ab274d 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -1120,6 +1120,60 @@ static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_rtt = hctx->ccid3hctx_rtt; } +static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, + u32 __user *optval, int __user *optlen) +{ + const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + const void *val; + + /* Listen socks doesn't have a private CCID block */ + if (sk->sk_state == DCCP_LISTEN) + return -EINVAL; + + switch (optname) { + case DCCP_SOCKOPT_CCID_RX_INFO: + if (len < sizeof(hcrx->ccid3hcrx_tfrc)) + return -EINVAL; + len = sizeof(hcrx->ccid3hcrx_tfrc); + val = &hcrx->ccid3hcrx_tfrc; + break; + default: + return -ENOPROTOOPT; + } + + if (put_user(len, optlen) || copy_to_user(optval, val, len)) + return -EFAULT; + + return 0; +} + +static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, + u32 __user *optval, int __user *optlen) +{ + const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); + const void *val; + + /* Listen socks doesn't have a private CCID block */ + if (sk->sk_state == DCCP_LISTEN) + return -EINVAL; + + switch (optname) { + case DCCP_SOCKOPT_CCID_TX_INFO: + if (len < sizeof(hctx->ccid3hctx_tfrc)) + return -EINVAL; + len = sizeof(hctx->ccid3hctx_tfrc); + val = &hctx->ccid3hctx_tfrc; + break; + default: + return -ENOPROTOOPT; + } + + if (put_user(len, optlen) || copy_to_user(optval, val, len)) + return -EFAULT; + + return 0; +} + static struct ccid ccid3 = { .ccid_id = 3, .ccid_name = "ccid3", @@ -1139,6 +1193,8 @@ static struct ccid ccid3 = { .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv, .ccid_hc_rx_get_info = ccid3_hc_rx_get_info, .ccid_hc_tx_get_info = ccid3_hc_tx_get_info, + .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt, + .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, }; module_param(ccid3_debug, int, 0444); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9bda2868eba6..a1cfd0e9e3bc 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -325,12 +325,7 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; - if (optname == DCCP_SOCKOPT_SERVICE) - return dccp_getsockopt_service(sk, len, - (u32 __user *)optval, optlen); - - len = min_t(unsigned int, len, sizeof(int)); - if (len < 0) + if (len < sizeof(int)) return -EINVAL; dp = dccp_sk(sk); @@ -338,7 +333,17 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: val = dp->dccps_packet_size; + len = sizeof(dp->dccps_packet_size); break; + case DCCP_SOCKOPT_SERVICE: + return dccp_getsockopt_service(sk, len, + (u32 __user *)optval, optlen); + case 128 ... 191: + return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, + len, (u32 __user *)optval, optlen); + case 192 ... 255: + return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, + len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } -- cgit v1.2.3 From 926b50f92a30090da2c1a8675de954c2d9b09732 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Mon, 19 Sep 2005 15:33:08 -0700 Subject: [NETFILTER]: Add new PPTP conntrack and NAT helper This new "version 3" PPTP conntrack/nat helper is finally ready for mainline inclusion. Special thanks to lots of last-minute bugfixing by Patric McHardy. Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_conntrack.h | 12 + include/linux/netfilter_ipv4/ip_conntrack_pptp.h | 332 +++++++++ .../linux/netfilter_ipv4/ip_conntrack_proto_gre.h | 114 +++ include/linux/netfilter_ipv4/ip_conntrack_tuple.h | 7 + include/linux/netfilter_ipv4/ip_nat_pptp.h | 11 + net/ipv4/netfilter/Kconfig | 22 + net/ipv4/netfilter/Makefile | 5 + net/ipv4/netfilter/ip_conntrack_helper_pptp.c | 805 +++++++++++++++++++++ net/ipv4/netfilter/ip_conntrack_proto_gre.c | 327 +++++++++ net/ipv4/netfilter/ip_nat_helper_pptp.c | 401 ++++++++++ net/ipv4/netfilter/ip_nat_proto_gre.c | 214 ++++++ 11 files changed, 2250 insertions(+) create mode 100644 include/linux/netfilter_ipv4/ip_conntrack_pptp.h create mode 100644 include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h create mode 100644 include/linux/netfilter_ipv4/ip_nat_pptp.h create mode 100644 net/ipv4/netfilter/ip_conntrack_helper_pptp.c create mode 100644 net/ipv4/netfilter/ip_conntrack_proto_gre.c create mode 100644 net/ipv4/netfilter/ip_nat_helper_pptp.c create mode 100644 net/ipv4/netfilter/ip_nat_proto_gre.c (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index 7e033e9271a8..2df446c952ef 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -133,11 +133,13 @@ enum ip_conntrack_expect_events { #include #include +#include #include /* per conntrack: protocol private data */ union ip_conntrack_proto { /* insert conntrack proto private data here */ + struct ip_ct_gre gre; struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct ip_ct_icmp icmp; @@ -148,6 +150,7 @@ union ip_conntrack_expect_proto { }; /* Add protocol helper include file here */ +#include #include #include #include @@ -155,12 +158,20 @@ union ip_conntrack_expect_proto { /* per conntrack: application helper private data */ union ip_conntrack_help { /* insert conntrack helper private data (master) here */ + struct ip_ct_pptp_master ct_pptp_info; struct ip_ct_ftp_master ct_ftp_info; struct ip_ct_irc_master ct_irc_info; }; #ifdef CONFIG_IP_NF_NAT_NEEDED #include +#include + +/* per conntrack: nat application helper private data */ +union ip_conntrack_nat_help { + /* insert nat helper private data here */ + struct ip_nat_pptp nat_pptp_info; +}; #endif #include @@ -223,6 +234,7 @@ struct ip_conntrack #ifdef CONFIG_IP_NF_NAT_NEEDED struct { struct ip_nat_info info; + union ip_conntrack_nat_help help; #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) int masq_index; diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h new file mode 100644 index 000000000000..389e3851d52f --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h @@ -0,0 +1,332 @@ +/* PPTP constants and structs */ +#ifndef _CONNTRACK_PPTP_H +#define _CONNTRACK_PPTP_H + +/* state of the control session */ +enum pptp_ctrlsess_state { + PPTP_SESSION_NONE, /* no session present */ + PPTP_SESSION_ERROR, /* some session error */ + PPTP_SESSION_STOPREQ, /* stop_sess request seen */ + PPTP_SESSION_REQUESTED, /* start_sess request seen */ + PPTP_SESSION_CONFIRMED, /* session established */ +}; + +/* state of the call inside the control session */ +enum pptp_ctrlcall_state { + PPTP_CALL_NONE, + PPTP_CALL_ERROR, + PPTP_CALL_OUT_REQ, + PPTP_CALL_OUT_CONF, + PPTP_CALL_IN_REQ, + PPTP_CALL_IN_REP, + PPTP_CALL_IN_CONF, + PPTP_CALL_CLEAR_REQ, +}; + + +/* conntrack private data */ +struct ip_ct_pptp_master { + enum pptp_ctrlsess_state sstate; /* session state */ + + /* everything below is going to be per-expectation in newnat, + * since there could be more than one call within one session */ + enum pptp_ctrlcall_state cstate; /* call state */ + u_int16_t pac_call_id; /* call id of PAC, host byte order */ + u_int16_t pns_call_id; /* call id of PNS, host byte order */ + + /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack + * and therefore imposes a fixed limit on the number of maps */ + struct ip_ct_gre_keymap *keymap_orig, *keymap_reply; +}; + +/* conntrack_expect private member */ +struct ip_ct_pptp_expect { + enum pptp_ctrlcall_state cstate; /* call state */ + u_int16_t pac_call_id; /* call id of PAC */ + u_int16_t pns_call_id; /* call id of PNS */ +}; + + +#ifdef __KERNEL__ + +#define IP_CONNTR_PPTP PPTP_CONTROL_PORT + +#define PPTP_CONTROL_PORT 1723 + +#define PPTP_PACKET_CONTROL 1 +#define PPTP_PACKET_MGMT 2 + +#define PPTP_MAGIC_COOKIE 0x1a2b3c4d + +struct pptp_pkt_hdr { + __u16 packetLength; + __u16 packetType; + __u32 magicCookie; +}; + +/* PptpControlMessageType values */ +#define PPTP_START_SESSION_REQUEST 1 +#define PPTP_START_SESSION_REPLY 2 +#define PPTP_STOP_SESSION_REQUEST 3 +#define PPTP_STOP_SESSION_REPLY 4 +#define PPTP_ECHO_REQUEST 5 +#define PPTP_ECHO_REPLY 6 +#define PPTP_OUT_CALL_REQUEST 7 +#define PPTP_OUT_CALL_REPLY 8 +#define PPTP_IN_CALL_REQUEST 9 +#define PPTP_IN_CALL_REPLY 10 +#define PPTP_IN_CALL_CONNECT 11 +#define PPTP_CALL_CLEAR_REQUEST 12 +#define PPTP_CALL_DISCONNECT_NOTIFY 13 +#define PPTP_WAN_ERROR_NOTIFY 14 +#define PPTP_SET_LINK_INFO 15 + +#define PPTP_MSG_MAX 15 + +/* PptpGeneralError values */ +#define PPTP_ERROR_CODE_NONE 0 +#define PPTP_NOT_CONNECTED 1 +#define PPTP_BAD_FORMAT 2 +#define PPTP_BAD_VALUE 3 +#define PPTP_NO_RESOURCE 4 +#define PPTP_BAD_CALLID 5 +#define PPTP_REMOVE_DEVICE_ERROR 6 + +struct PptpControlHeader { + __u16 messageType; + __u16 reserved; +}; + +/* FramingCapability Bitmap Values */ +#define PPTP_FRAME_CAP_ASYNC 0x1 +#define PPTP_FRAME_CAP_SYNC 0x2 + +/* BearerCapability Bitmap Values */ +#define PPTP_BEARER_CAP_ANALOG 0x1 +#define PPTP_BEARER_CAP_DIGITAL 0x2 + +struct PptpStartSessionRequest { + __u16 protocolVersion; + __u8 reserved1; + __u8 reserved2; + __u32 framingCapability; + __u32 bearerCapability; + __u16 maxChannels; + __u16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStartSessionResultCode Values */ +#define PPTP_START_OK 1 +#define PPTP_START_GENERAL_ERROR 2 +#define PPTP_START_ALREADY_CONNECTED 3 +#define PPTP_START_NOT_AUTHORIZED 4 +#define PPTP_START_UNKNOWN_PROTOCOL 5 + +struct PptpStartSessionReply { + __u16 protocolVersion; + __u8 resultCode; + __u8 generalErrorCode; + __u32 framingCapability; + __u32 bearerCapability; + __u16 maxChannels; + __u16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStopReasons */ +#define PPTP_STOP_NONE 1 +#define PPTP_STOP_PROTOCOL 2 +#define PPTP_STOP_LOCAL_SHUTDOWN 3 + +struct PptpStopSessionRequest { + __u8 reason; +}; + +/* PptpStopSessionResultCode */ +#define PPTP_STOP_OK 1 +#define PPTP_STOP_GENERAL_ERROR 2 + +struct PptpStopSessionReply { + __u8 resultCode; + __u8 generalErrorCode; +}; + +struct PptpEchoRequest { + __u32 identNumber; +}; + +/* PptpEchoReplyResultCode */ +#define PPTP_ECHO_OK 1 +#define PPTP_ECHO_GENERAL_ERROR 2 + +struct PptpEchoReply { + __u32 identNumber; + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved; +}; + +/* PptpFramingType */ +#define PPTP_ASYNC_FRAMING 1 +#define PPTP_SYNC_FRAMING 2 +#define PPTP_DONT_CARE_FRAMING 3 + +/* PptpCallBearerType */ +#define PPTP_ANALOG_TYPE 1 +#define PPTP_DIGITAL_TYPE 2 +#define PPTP_DONT_CARE_BEARER_TYPE 3 + +struct PptpOutCallRequest { + __u16 callID; + __u16 callSerialNumber; + __u32 minBPS; + __u32 maxBPS; + __u32 bearerType; + __u32 framingType; + __u16 packetWindow; + __u16 packetProcDelay; + __u16 reserved1; + __u16 phoneNumberLength; + __u16 reserved2; + __u8 phoneNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpCallResultCode */ +#define PPTP_OUTCALL_CONNECT 1 +#define PPTP_OUTCALL_GENERAL_ERROR 2 +#define PPTP_OUTCALL_NO_CARRIER 3 +#define PPTP_OUTCALL_BUSY 4 +#define PPTP_OUTCALL_NO_DIAL_TONE 5 +#define PPTP_OUTCALL_TIMEOUT 6 +#define PPTP_OUTCALL_DONT_ACCEPT 7 + +struct PptpOutCallReply { + __u16 callID; + __u16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 causeCode; + __u32 connectSpeed; + __u16 packetWindow; + __u16 packetProcDelay; + __u32 physChannelID; +}; + +struct PptpInCallRequest { + __u16 callID; + __u16 callSerialNumber; + __u32 callBearerType; + __u32 physChannelID; + __u16 dialedNumberLength; + __u16 dialingNumberLength; + __u8 dialedNumber[64]; + __u8 dialingNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpInCallResultCode */ +#define PPTP_INCALL_ACCEPT 1 +#define PPTP_INCALL_GENERAL_ERROR 2 +#define PPTP_INCALL_DONT_ACCEPT 3 + +struct PptpInCallReply { + __u16 callID; + __u16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 packetWindow; + __u16 packetProcDelay; + __u16 reserved; +}; + +struct PptpInCallConnected { + __u16 peersCallID; + __u16 reserved; + __u32 connectSpeed; + __u16 packetWindow; + __u16 packetProcDelay; + __u32 callFramingType; +}; + +struct PptpClearCallRequest { + __u16 callID; + __u16 reserved; +}; + +struct PptpCallDisconnectNotify { + __u16 callID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 causeCode; + __u16 reserved; + __u8 callStatistics[128]; +}; + +struct PptpWanErrorNotify { + __u16 peersCallID; + __u16 reserved; + __u32 crcErrors; + __u32 framingErrors; + __u32 hardwareOverRuns; + __u32 bufferOverRuns; + __u32 timeoutErrors; + __u32 alignmentErrors; +}; + +struct PptpSetLinkInfo { + __u16 peersCallID; + __u16 reserved; + __u32 sendAccm; + __u32 recvAccm; +}; + + +struct pptp_priv_data { + __u16 call_id; + __u16 mcall_id; + __u16 pcall_id; +}; + +union pptp_ctrl_union { + struct PptpStartSessionRequest sreq; + struct PptpStartSessionReply srep; + struct PptpStopSessionRequest streq; + struct PptpStopSessionReply strep; + struct PptpOutCallRequest ocreq; + struct PptpOutCallReply ocack; + struct PptpInCallRequest icreq; + struct PptpInCallReply icack; + struct PptpInCallConnected iccon; + struct PptpClearCallRequest clrreq; + struct PptpCallDisconnectNotify disc; + struct PptpWanErrorNotify wanerr; + struct PptpSetLinkInfo setlink; +}; + +extern int +(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern int +(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern int +(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *exp_orig, + struct ip_conntrack_expect *exp_reply); + +extern void +(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct, + struct ip_conntrack_expect *exp); +#endif /* __KERNEL__ */ +#endif /* _CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h new file mode 100644 index 000000000000..8d090ef82f5f --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h @@ -0,0 +1,114 @@ +#ifndef _CONNTRACK_PROTO_GRE_H +#define _CONNTRACK_PROTO_GRE_H +#include + +/* GRE PROTOCOL HEADER */ + +/* GRE Version field */ +#define GRE_VERSION_1701 0x0 +#define GRE_VERSION_PPTP 0x1 + +/* GRE Protocol field */ +#define GRE_PROTOCOL_PPTP 0x880B + +/* GRE Flags */ +#define GRE_FLAG_C 0x80 +#define GRE_FLAG_R 0x40 +#define GRE_FLAG_K 0x20 +#define GRE_FLAG_S 0x10 +#define GRE_FLAG_A 0x80 + +#define GRE_IS_C(f) ((f)&GRE_FLAG_C) +#define GRE_IS_R(f) ((f)&GRE_FLAG_R) +#define GRE_IS_K(f) ((f)&GRE_FLAG_K) +#define GRE_IS_S(f) ((f)&GRE_FLAG_S) +#define GRE_IS_A(f) ((f)&GRE_FLAG_A) + +/* GRE is a mess: Four different standards */ +struct gre_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rec:3, + srr:1, + seq:1, + key:1, + routing:1, + csum:1, + version:3, + reserved:4, + ack:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 csum:1, + routing:1, + key:1, + seq:1, + srr:1, + rec:3, + ack:1, + reserved:4, + version:3; +#else +#error "Adjust your defines" +#endif + __u16 protocol; +}; + +/* modified GRE header for PPTP */ +struct gre_hdr_pptp { + __u8 flags; /* bitfield */ + __u8 version; /* should be GRE_VERSION_PPTP */ + __u16 protocol; /* should be GRE_PROTOCOL_PPTP */ + __u16 payload_len; /* size of ppp payload, not inc. gre header */ + __u16 call_id; /* peer's call_id for this session */ + __u32 seq; /* sequence number. Present if S==1 */ + __u32 ack; /* seq number of highest packet recieved by */ + /* sender in this session */ +}; + + +/* this is part of ip_conntrack */ +struct ip_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +#ifdef __KERNEL__ +struct ip_conntrack_expect; +struct ip_conntrack; + +/* structure for original <-> reply keymap */ +struct ip_ct_gre_keymap { + struct list_head list; + + struct ip_conntrack_tuple tuple; +}; + +/* add new tuple->key_reply pair to keymap */ +int ip_ct_gre_keymap_add(struct ip_conntrack *ct, + struct ip_conntrack_tuple *t, + int reply); + +/* delete keymap entries */ +void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct); + + +/* get pointer to gre key, if present */ +static inline u_int32_t *gre_key(struct gre_hdr *greh) +{ + if (!greh->key) + return NULL; + if (greh->csum || greh->routing) + return (u_int32_t *) (greh+sizeof(*greh)+4); + return (u_int32_t *) (greh+sizeof(*greh)); +} + +/* get pointer ot gre csum, if present */ +static inline u_int16_t *gre_csum(struct gre_hdr *greh) +{ + if (!greh->csum) + return NULL; + return (u_int16_t *) (greh+sizeof(*greh)); +} + +#endif /* __KERNEL__ */ + +#endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h index c33f0b5e0d0a..14dc0f7b6556 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h @@ -28,6 +28,9 @@ union ip_conntrack_manip_proto struct { u_int16_t port; } sctp; + struct { + u_int16_t key; /* key is 32bit, pptp only uses 16 */ + } gre; }; /* The manipulable part of the tuple. */ @@ -61,6 +64,10 @@ struct ip_conntrack_tuple struct { u_int16_t port; } sctp; + struct { + u_int16_t key; /* key is 32bit, + * pptp only uses 16 */ + } gre; } u; /* The protocol. */ diff --git a/include/linux/netfilter_ipv4/ip_nat_pptp.h b/include/linux/netfilter_ipv4/ip_nat_pptp.h new file mode 100644 index 000000000000..eaf66c2e8f93 --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_nat_pptp.h @@ -0,0 +1,11 @@ +/* PPTP constants and structs */ +#ifndef _NAT_PPTP_H +#define _NAT_PPTP_H + +/* conntrack private data */ +struct ip_nat_pptp { + u_int16_t pns_call_id; /* NAT'ed PNS call id */ + u_int16_t pac_call_id; /* NAT'ed PAC call id */ +}; + +#endif /* _NAT_PPTP_H */ diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index e2162d270073..3cf9b451675c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -137,6 +137,22 @@ config IP_NF_AMANDA To compile it as a module, choose M here. If unsure, say Y. +config IP_NF_PPTP + tristate 'PPTP protocol support' + help + This module adds support for PPTP (Point to Point Tunnelling + Protocol, RFC2637) conncection tracking and NAT. + + If you are running PPTP sessions over a stateful firewall or NAT + box, you may want to enable this feature. + + Please note that not all PPTP modes of operation are supported yet. + For more info, read top of the file + net/ipv4/netfilter/ip_conntrack_pptp.c + + If you want to compile it as a module, say M here and read + Documentation/modules.txt. If unsure, say `N'. + config IP_NF_QUEUE tristate "IP Userspace queueing via NETLINK (OBSOLETE)" help @@ -621,6 +637,12 @@ config IP_NF_NAT_AMANDA default IP_NF_NAT if IP_NF_AMANDA=y default m if IP_NF_AMANDA=m +config IP_NF_NAT_PPTP + tristate + depends on IP_NF_NAT!=n && IP_NF_PPTP!=n + default IP_NF_NAT if IP_NF_PPTP=y + default m if IP_NF_PPTP=m + # mangle + specific targets config IP_NF_MANGLE tristate "Packet mangling" diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 1ba0db746817..3d45d3c0283c 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -6,6 +6,9 @@ ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o +ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o +ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o + # connection tracking obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o @@ -17,6 +20,7 @@ obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o # connection tracking helpers +obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o @@ -24,6 +28,7 @@ obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o # NAT helpers +obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c new file mode 100644 index 000000000000..79db5b70d5f6 --- /dev/null +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c @@ -0,0 +1,805 @@ +/* + * ip_conntrack_pptp.c - Version 3.0 + * + * Connection tracking support for PPTP (Point to Point Tunneling Protocol). + * PPTP is a a protocol for creating virtual private networks. + * It is a specification defined by Microsoft and some vendors + * working with Microsoft. PPTP is built on top of a modified + * version of the Internet Generic Routing Encapsulation Protocol. + * GRE is defined in RFC 1701 and RFC 1702. Documentation of + * PPTP can be found in RFC 2637 + * + * (C) 2000-2005 by Harald Welte + * + * Development of this code funded by Astaro AG (http://www.astaro.com/) + * + * Limitations: + * - We blindly assume that control connections are always + * established in PNS->PAC direction. This is a violation + * of RFFC2673 + * - We can only support one single call within each session + * + * TODO: + * - testing of incoming PPTP calls + * + * Changes: + * 2002-02-05 - Version 1.3 + * - Call ip_conntrack_unexpect_related() from + * pptp_destroy_siblings() to destroy expectations in case + * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen + * (Philip Craig ) + * - Add Version information at module loadtime + * 2002-02-10 - Version 1.6 + * - move to C99 style initializers + * - remove second expectation if first arrives + * 2004-10-22 - Version 2.0 + * - merge Mandrake's 2.6.x port with recent 2.6.x API changes + * - fix lots of linear skb assumptions from Mandrake's port + * 2005-06-10 - Version 2.1 + * - use ip_conntrack_expect_free() instead of kfree() on the + * expect's (which are from the slab for quite some time) + * 2005-06-10 - Version 3.0 + * - port helper to post-2.6.11 API changes, + * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/) + * 2005-07-30 - Version 3.1 + * - port helper to 2.6.13 API changes + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define IP_CT_PPTP_VERSION "3.1" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP"); + +static DEFINE_SPINLOCK(ip_pptp_lock); + +int +(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +int +(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +int +(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *expect_orig, + struct ip_conntrack_expect *expect_reply); + +void +(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct, + struct ip_conntrack_expect *exp); + +#if 0 +/* PptpControlMessageType names */ +const char *pptp_msg_name[] = { + "UNKNOWN_MESSAGE", + "START_SESSION_REQUEST", + "START_SESSION_REPLY", + "STOP_SESSION_REQUEST", + "STOP_SESSION_REPLY", + "ECHO_REQUEST", + "ECHO_REPLY", + "OUT_CALL_REQUEST", + "OUT_CALL_REPLY", + "IN_CALL_REQUEST", + "IN_CALL_REPLY", + "IN_CALL_CONNECT", + "CALL_CLEAR_REQUEST", + "CALL_DISCONNECT_NOTIFY", + "WAN_ERROR_NOTIFY", + "SET_LINK_INFO" +}; +EXPORT_SYMBOL(pptp_msg_name); +#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) +#else +#define DEBUGP(format, args...) +#endif + +#define SECS *HZ +#define MINS * 60 SECS +#define HOURS * 60 MINS + +#define PPTP_GRE_TIMEOUT (10 MINS) +#define PPTP_GRE_STREAM_TIMEOUT (5 HOURS) + +static void pptp_expectfn(struct ip_conntrack *ct, + struct ip_conntrack_expect *exp) +{ + DEBUGP("increasing timeouts\n"); + + /* increase timeout of GRE data channel conntrack entry */ + ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; + ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; + + /* Can you see how rusty this code is, compared with the pre-2.6.11 + * one? That's what happened to my shiny newnat of 2002 ;( -HW */ + + if (!ip_nat_pptp_hook_expectfn) { + struct ip_conntrack_tuple inv_t; + struct ip_conntrack_expect *exp_other; + + /* obviously this tuple inversion only works until you do NAT */ + invert_tuplepr(&inv_t, &exp->tuple); + DEBUGP("trying to unexpect other dir: "); + DUMP_TUPLE(&inv_t); + + exp_other = ip_conntrack_expect_find(&inv_t); + if (exp_other) { + /* delete other expectation. */ + DEBUGP("found\n"); + ip_conntrack_unexpect_related(exp_other); + ip_conntrack_expect_put(exp_other); + } else { + DEBUGP("not found\n"); + } + } else { + /* we need more than simple inversion */ + ip_nat_pptp_hook_expectfn(ct, exp); + } +} + +static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) +{ + struct ip_conntrack_tuple_hash *h; + struct ip_conntrack_expect *exp; + + DEBUGP("trying to timeout ct or exp for tuple "); + DUMP_TUPLE(t); + + h = ip_conntrack_find_get(t, NULL); + if (h) { + struct ip_conntrack *sibling = tuplehash_to_ctrack(h); + DEBUGP("setting timeout of conntrack %p to 0\n", sibling); + sibling->proto.gre.timeout = 0; + sibling->proto.gre.stream_timeout = 0; + /* refresh_acct will not modify counters if skb == NULL */ + if (del_timer(&sibling->timeout)) + sibling->timeout.function((unsigned long)sibling); + ip_conntrack_put(sibling); + return 1; + } else { + exp = ip_conntrack_expect_find(t); + if (exp) { + DEBUGP("unexpect_related of expect %p\n", exp); + ip_conntrack_unexpect_related(exp); + ip_conntrack_expect_put(exp); + return 1; + } + } + + return 0; +} + + +/* timeout GRE data connections */ +static void pptp_destroy_siblings(struct ip_conntrack *ct) +{ + struct ip_conntrack_tuple t; + + /* Since ct->sibling_list has literally rusted away in 2.6.11, + * we now need another way to find out about our sibling + * contrack and expects... -HW */ + + /* try original (pns->pac) tuple */ + memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); + t.dst.protonum = IPPROTO_GRE; + t.src.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id); + t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id); + + if (!destroy_sibling_or_exp(&t)) + DEBUGP("failed to timeout original pns->pac ct/exp\n"); + + /* try reply (pac->pns) tuple */ + memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); + t.dst.protonum = IPPROTO_GRE; + t.src.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id); + t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id); + + if (!destroy_sibling_or_exp(&t)) + DEBUGP("failed to timeout reply pac->pns ct/exp\n"); +} + +/* expect GRE connections (PNS->PAC and PAC->PNS direction) */ +static inline int +exp_gre(struct ip_conntrack *master, + u_int32_t seq, + u_int16_t callid, + u_int16_t peer_callid) +{ + struct ip_conntrack_tuple inv_tuple; + struct ip_conntrack_tuple exp_tuples[] = { + /* tuple in original direction, PNS->PAC */ + { .src = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip, + .u = { .gre = { .key = peer_callid } } + }, + .dst = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip, + .u = { .gre = { .key = callid } }, + .protonum = IPPROTO_GRE + }, + }, + /* tuple in reply direction, PAC->PNS */ + { .src = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip, + .u = { .gre = { .key = callid } } + }, + .dst = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip, + .u = { .gre = { .key = peer_callid } }, + .protonum = IPPROTO_GRE + }, + } + }; + struct ip_conntrack_expect *exp_orig, *exp_reply; + int ret = 1; + + exp_orig = ip_conntrack_expect_alloc(master); + if (exp_orig == NULL) + goto out; + + exp_reply = ip_conntrack_expect_alloc(master); + if (exp_reply == NULL) + goto out_put_orig; + + memcpy(&exp_orig->tuple, &exp_tuples[0], sizeof(exp_orig->tuple)); + + exp_orig->mask.src.ip = 0xffffffff; + exp_orig->mask.src.u.all = 0; + exp_orig->mask.dst.u.all = 0; + exp_orig->mask.dst.u.gre.key = 0xffff; + exp_orig->mask.dst.ip = 0xffffffff; + exp_orig->mask.dst.protonum = 0xff; + + exp_orig->master = master; + exp_orig->expectfn = pptp_expectfn; + exp_orig->flags = 0; + + exp_orig->dir = IP_CT_DIR_ORIGINAL; + + /* both expectations are identical apart from tuple */ + memcpy(exp_reply, exp_orig, sizeof(*exp_reply)); + memcpy(&exp_reply->tuple, &exp_tuples[1], sizeof(exp_reply->tuple)); + + exp_reply->dir = !exp_orig->dir; + + if (ip_nat_pptp_hook_exp_gre) + ret = ip_nat_pptp_hook_exp_gre(exp_orig, exp_reply); + else { + + DEBUGP("calling expect_related PNS->PAC"); + DUMP_TUPLE(&exp_orig->tuple); + + if (ip_conntrack_expect_related(exp_orig) != 0) { + DEBUGP("cannot expect_related()\n"); + goto out_put_both; + } + + DEBUGP("calling expect_related PAC->PNS"); + DUMP_TUPLE(&exp_reply->tuple); + + if (ip_conntrack_expect_related(exp_reply) != 0) { + DEBUGP("cannot expect_related()\n"); + goto out_unexpect_orig; + } + + /* Add GRE keymap entries */ + if (ip_ct_gre_keymap_add(master, &exp_reply->tuple, 0) != 0) { + DEBUGP("cannot keymap_add() exp\n"); + goto out_unexpect_both; + } + + invert_tuplepr(&inv_tuple, &exp_reply->tuple); + if (ip_ct_gre_keymap_add(master, &inv_tuple, 1) != 0) { + ip_ct_gre_keymap_destroy(master); + DEBUGP("cannot keymap_add() exp_inv\n"); + goto out_unexpect_both; + } + ret = 0; + } + +out_put_both: + ip_conntrack_expect_put(exp_reply); +out_put_orig: + ip_conntrack_expect_put(exp_orig); +out: + return ret; + +out_unexpect_both: + ip_conntrack_unexpect_related(exp_reply); +out_unexpect_orig: + ip_conntrack_unexpect_related(exp_orig); + goto out_put_both; +} + +static inline int +pptp_inbound_pkt(struct sk_buff **pskb, + struct tcphdr *tcph, + unsigned int nexthdr_off, + unsigned int datalen, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo) +{ + struct PptpControlHeader _ctlh, *ctlh; + unsigned int reqlen; + union pptp_ctrl_union _pptpReq, *pptpReq; + struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; + u_int16_t msg, *cid, *pcid; + u_int32_t seq; + + ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); + if (!ctlh) { + DEBUGP("error during skb_header_pointer\n"); + return NF_ACCEPT; + } + nexthdr_off += sizeof(_ctlh); + datalen -= sizeof(_ctlh); + + reqlen = datalen; + if (reqlen > sizeof(*pptpReq)) + reqlen = sizeof(*pptpReq); + pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq); + if (!pptpReq) { + DEBUGP("error during skb_header_pointer\n"); + return NF_ACCEPT; + } + + msg = ntohs(ctlh->messageType); + DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); + + switch (msg) { + case PPTP_START_SESSION_REPLY: + if (reqlen < sizeof(_pptpReq.srep)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server confirms new control session */ + if (info->sstate < PPTP_SESSION_REQUESTED) { + DEBUGP("%s without START_SESS_REQUEST\n", + pptp_msg_name[msg]); + break; + } + if (pptpReq->srep.resultCode == PPTP_START_OK) + info->sstate = PPTP_SESSION_CONFIRMED; + else + info->sstate = PPTP_SESSION_ERROR; + break; + + case PPTP_STOP_SESSION_REPLY: + if (reqlen < sizeof(_pptpReq.strep)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server confirms end of control session */ + if (info->sstate > PPTP_SESSION_STOPREQ) { + DEBUGP("%s without STOP_SESS_REQUEST\n", + pptp_msg_name[msg]); + break; + } + if (pptpReq->strep.resultCode == PPTP_STOP_OK) + info->sstate = PPTP_SESSION_NONE; + else + info->sstate = PPTP_SESSION_ERROR; + break; + + case PPTP_OUT_CALL_REPLY: + if (reqlen < sizeof(_pptpReq.ocack)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server accepted call, we now expect GRE frames */ + if (info->sstate != PPTP_SESSION_CONFIRMED) { + DEBUGP("%s but no session\n", pptp_msg_name[msg]); + break; + } + if (info->cstate != PPTP_CALL_OUT_REQ && + info->cstate != PPTP_CALL_OUT_CONF) { + DEBUGP("%s without OUTCALL_REQ\n", pptp_msg_name[msg]); + break; + } + if (pptpReq->ocack.resultCode != PPTP_OUTCALL_CONNECT) { + info->cstate = PPTP_CALL_NONE; + break; + } + + cid = &pptpReq->ocack.callID; + pcid = &pptpReq->ocack.peersCallID; + + info->pac_call_id = ntohs(*cid); + + if (htons(info->pns_call_id) != *pcid) { + DEBUGP("%s for unknown callid %u\n", + pptp_msg_name[msg], ntohs(*pcid)); + break; + } + + DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], + ntohs(*cid), ntohs(*pcid)); + + info->cstate = PPTP_CALL_OUT_CONF; + + seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr) + + sizeof(struct PptpControlHeader) + + ((void *)pcid - (void *)pptpReq); + + if (exp_gre(ct, seq, *cid, *pcid) != 0) + printk("ip_conntrack_pptp: error during exp_gre\n"); + break; + + case PPTP_IN_CALL_REQUEST: + if (reqlen < sizeof(_pptpReq.icack)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server tells us about incoming call request */ + if (info->sstate != PPTP_SESSION_CONFIRMED) { + DEBUGP("%s but no session\n", pptp_msg_name[msg]); + break; + } + pcid = &pptpReq->icack.peersCallID; + DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid)); + info->cstate = PPTP_CALL_IN_REQ; + info->pac_call_id = ntohs(*pcid); + break; + + case PPTP_IN_CALL_CONNECT: + if (reqlen < sizeof(_pptpReq.iccon)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server tells us about incoming call established */ + if (info->sstate != PPTP_SESSION_CONFIRMED) { + DEBUGP("%s but no session\n", pptp_msg_name[msg]); + break; + } + if (info->sstate != PPTP_CALL_IN_REP + && info->sstate != PPTP_CALL_IN_CONF) { + DEBUGP("%s but never sent IN_CALL_REPLY\n", + pptp_msg_name[msg]); + break; + } + + pcid = &pptpReq->iccon.peersCallID; + cid = &info->pac_call_id; + + if (info->pns_call_id != ntohs(*pcid)) { + DEBUGP("%s for unknown CallID %u\n", + pptp_msg_name[msg], ntohs(*cid)); + break; + } + + DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid)); + info->cstate = PPTP_CALL_IN_CONF; + + /* we expect a GRE connection from PAC to PNS */ + seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr) + + sizeof(struct PptpControlHeader) + + ((void *)pcid - (void *)pptpReq); + + if (exp_gre(ct, seq, *cid, *pcid) != 0) + printk("ip_conntrack_pptp: error during exp_gre\n"); + + break; + + case PPTP_CALL_DISCONNECT_NOTIFY: + if (reqlen < sizeof(_pptpReq.disc)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* server confirms disconnect */ + cid = &pptpReq->disc.callID; + DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid)); + info->cstate = PPTP_CALL_NONE; + + /* untrack this call id, unexpect GRE packets */ + pptp_destroy_siblings(ct); + break; + + case PPTP_WAN_ERROR_NOTIFY: + break; + + case PPTP_ECHO_REQUEST: + case PPTP_ECHO_REPLY: + /* I don't have to explain these ;) */ + break; + default: + DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX) + ? pptp_msg_name[msg]:pptp_msg_name[0], msg); + break; + } + + + if (ip_nat_pptp_hook_inbound) + return ip_nat_pptp_hook_inbound(pskb, ct, ctinfo, ctlh, + pptpReq); + + return NF_ACCEPT; + +} + +static inline int +pptp_outbound_pkt(struct sk_buff **pskb, + struct tcphdr *tcph, + unsigned int nexthdr_off, + unsigned int datalen, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo) +{ + struct PptpControlHeader _ctlh, *ctlh; + unsigned int reqlen; + union pptp_ctrl_union _pptpReq, *pptpReq; + struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; + u_int16_t msg, *cid, *pcid; + + ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); + if (!ctlh) + return NF_ACCEPT; + nexthdr_off += sizeof(_ctlh); + datalen -= sizeof(_ctlh); + + reqlen = datalen; + if (reqlen > sizeof(*pptpReq)) + reqlen = sizeof(*pptpReq); + pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq); + if (!pptpReq) + return NF_ACCEPT; + + msg = ntohs(ctlh->messageType); + DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); + + switch (msg) { + case PPTP_START_SESSION_REQUEST: + /* client requests for new control session */ + if (info->sstate != PPTP_SESSION_NONE) { + DEBUGP("%s but we already have one", + pptp_msg_name[msg]); + } + info->sstate = PPTP_SESSION_REQUESTED; + break; + case PPTP_STOP_SESSION_REQUEST: + /* client requests end of control session */ + info->sstate = PPTP_SESSION_STOPREQ; + break; + + case PPTP_OUT_CALL_REQUEST: + if (reqlen < sizeof(_pptpReq.ocreq)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + /* FIXME: break; */ + } + + /* client initiating connection to server */ + if (info->sstate != PPTP_SESSION_CONFIRMED) { + DEBUGP("%s but no session\n", + pptp_msg_name[msg]); + break; + } + info->cstate = PPTP_CALL_OUT_REQ; + /* track PNS call id */ + cid = &pptpReq->ocreq.callID; + DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid)); + info->pns_call_id = ntohs(*cid); + break; + case PPTP_IN_CALL_REPLY: + if (reqlen < sizeof(_pptpReq.icack)) { + DEBUGP("%s: short packet\n", pptp_msg_name[msg]); + break; + } + + /* client answers incoming call */ + if (info->cstate != PPTP_CALL_IN_REQ + && info->cstate != PPTP_CALL_IN_REP) { + DEBUGP("%s without incall_req\n", + pptp_msg_name[msg]); + break; + } + if (pptpReq->icack.resultCode != PPTP_INCALL_ACCEPT) { + info->cstate = PPTP_CALL_NONE; + break; + } + pcid = &pptpReq->icack.peersCallID; + if (info->pac_call_id != ntohs(*pcid)) { + DEBUGP("%s for unknown call %u\n", + pptp_msg_name[msg], ntohs(*pcid)); + break; + } + DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*pcid)); + /* part two of the three-way handshake */ + info->cstate = PPTP_CALL_IN_REP; + info->pns_call_id = ntohs(pptpReq->icack.callID); + break; + + case PPTP_CALL_CLEAR_REQUEST: + /* client requests hangup of call */ + if (info->sstate != PPTP_SESSION_CONFIRMED) { + DEBUGP("CLEAR_CALL but no session\n"); + break; + } + /* FUTURE: iterate over all calls and check if + * call ID is valid. We don't do this without newnat, + * because we only know about last call */ + info->cstate = PPTP_CALL_CLEAR_REQ; + break; + case PPTP_SET_LINK_INFO: + break; + case PPTP_ECHO_REQUEST: + case PPTP_ECHO_REPLY: + /* I don't have to explain these ;) */ + break; + default: + DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)? + pptp_msg_name[msg]:pptp_msg_name[0], msg); + /* unknown: no need to create GRE masq table entry */ + break; + } + + if (ip_nat_pptp_hook_outbound) + return ip_nat_pptp_hook_outbound(pskb, ct, ctinfo, ctlh, + pptpReq); + + return NF_ACCEPT; +} + + +/* track caller id inside control connection, call expect_related */ +static int +conntrack_pptp_help(struct sk_buff **pskb, + struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) + +{ + struct pptp_pkt_hdr _pptph, *pptph; + struct tcphdr _tcph, *tcph; + u_int32_t tcplen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4; + u_int32_t datalen; + int dir = CTINFO2DIR(ctinfo); + struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; + unsigned int nexthdr_off; + + int oldsstate, oldcstate; + int ret; + + /* don't do any tracking before tcp handshake complete */ + if (ctinfo != IP_CT_ESTABLISHED + && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) { + DEBUGP("ctinfo = %u, skipping\n", ctinfo); + return NF_ACCEPT; + } + + nexthdr_off = (*pskb)->nh.iph->ihl*4; + tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); + BUG_ON(!tcph); + nexthdr_off += tcph->doff * 4; + datalen = tcplen - tcph->doff * 4; + + if (tcph->fin || tcph->rst) { + DEBUGP("RST/FIN received, timeouting GRE\n"); + /* can't do this after real newnat */ + info->cstate = PPTP_CALL_NONE; + + /* untrack this call id, unexpect GRE packets */ + pptp_destroy_siblings(ct); + } + + pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); + if (!pptph) { + DEBUGP("no full PPTP header, can't track\n"); + return NF_ACCEPT; + } + nexthdr_off += sizeof(_pptph); + datalen -= sizeof(_pptph); + + /* if it's not a control message we can't do anything with it */ + if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || + ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { + DEBUGP("not a control packet\n"); + return NF_ACCEPT; + } + + oldsstate = info->sstate; + oldcstate = info->cstate; + + spin_lock_bh(&ip_pptp_lock); + + /* FIXME: We just blindly assume that the control connection is always + * established from PNS->PAC. However, RFC makes no guarantee */ + if (dir == IP_CT_DIR_ORIGINAL) + /* client -> server (PNS -> PAC) */ + ret = pptp_outbound_pkt(pskb, tcph, nexthdr_off, datalen, ct, + ctinfo); + else + /* server -> client (PAC -> PNS) */ + ret = pptp_inbound_pkt(pskb, tcph, nexthdr_off, datalen, ct, + ctinfo); + DEBUGP("sstate: %d->%d, cstate: %d->%d\n", + oldsstate, info->sstate, oldcstate, info->cstate); + spin_unlock_bh(&ip_pptp_lock); + + return ret; +} + +/* control protocol helper */ +static struct ip_conntrack_helper pptp = { + .list = { NULL, NULL }, + .name = "pptp", + .me = THIS_MODULE, + .max_expected = 2, + .timeout = 5 * 60, + .tuple = { .src = { .ip = 0, + .u = { .tcp = { .port = + __constant_htons(PPTP_CONTROL_PORT) } } + }, + .dst = { .ip = 0, + .u = { .all = 0 }, + .protonum = IPPROTO_TCP + } + }, + .mask = { .src = { .ip = 0, + .u = { .tcp = { .port = 0xffff } } + }, + .dst = { .ip = 0, + .u = { .all = 0 }, + .protonum = 0xff + } + }, + .help = conntrack_pptp_help +}; + +extern void __exit ip_ct_proto_gre_fini(void); +extern int __init ip_ct_proto_gre_init(void); + +/* ip_conntrack_pptp initialization */ +static int __init init(void) +{ + int retcode; + + retcode = ip_ct_proto_gre_init(); + if (retcode < 0) + return retcode; + + DEBUGP(" registering helper\n"); + if ((retcode = ip_conntrack_helper_register(&pptp))) { + printk(KERN_ERR "Unable to register conntrack application " + "helper for pptp: %d\n", retcode); + ip_ct_proto_gre_fini(); + return retcode; + } + + printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION); + return 0; +} + +static void __exit fini(void) +{ + ip_conntrack_helper_unregister(&pptp); + ip_ct_proto_gre_fini(); + printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION); +} + +module_init(init); +module_exit(fini); + +EXPORT_SYMBOL(ip_nat_pptp_hook_outbound); +EXPORT_SYMBOL(ip_nat_pptp_hook_inbound); +EXPORT_SYMBOL(ip_nat_pptp_hook_exp_gre); +EXPORT_SYMBOL(ip_nat_pptp_hook_expectfn); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c new file mode 100644 index 000000000000..de3cb9db6f85 --- /dev/null +++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c @@ -0,0 +1,327 @@ +/* + * ip_conntrack_proto_gre.c - Version 3.0 + * + * Connection tracking protocol helper module for GRE. + * + * GRE is a generic encapsulation protocol, which is generally not very + * suited for NAT, as it has no protocol-specific part as port numbers. + * + * It has an optional key field, which may help us distinguishing two + * connections between the same two hosts. + * + * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 + * + * PPTP is built on top of a modified version of GRE, and has a mandatory + * field called "CallID", which serves us for the same purpose as the key + * field in plain GRE. + * + * Documentation about PPTP can be found in RFC 2637 + * + * (C) 2000-2005 by Harald Welte + * + * Development of this code funded by Astaro AG (http://www.astaro.com/) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_RWLOCK(ip_ct_gre_lock); +#define ASSERT_READ_LOCK(x) +#define ASSERT_WRITE_LOCK(x) + +#include +#include +#include +#include + +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE"); + +/* shamelessly stolen from ip_conntrack_proto_udp.c */ +#define GRE_TIMEOUT (30*HZ) +#define GRE_STREAM_TIMEOUT (180*HZ) + +#if 0 +#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) +#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \ + NIPQUAD((x)->src.ip), ntohs((x)->src.u.gre.key), \ + NIPQUAD((x)->dst.ip), ntohs((x)->dst.u.gre.key)) +#else +#define DEBUGP(x, args...) +#define DUMP_TUPLE_GRE(x) +#endif + +/* GRE KEYMAP HANDLING FUNCTIONS */ +static LIST_HEAD(gre_keymap_list); + +static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km, + const struct ip_conntrack_tuple *t) +{ + return ((km->tuple.src.ip == t->src.ip) && + (km->tuple.dst.ip == t->dst.ip) && + (km->tuple.dst.protonum == t->dst.protonum) && + (km->tuple.dst.u.all == t->dst.u.all)); +} + +/* look up the source key for a given tuple */ +static u_int32_t gre_keymap_lookup(struct ip_conntrack_tuple *t) +{ + struct ip_ct_gre_keymap *km; + u_int32_t key = 0; + + read_lock_bh(&ip_ct_gre_lock); + km = LIST_FIND(&gre_keymap_list, gre_key_cmpfn, + struct ip_ct_gre_keymap *, t); + if (km) + key = km->tuple.src.u.gre.key; + read_unlock_bh(&ip_ct_gre_lock); + + DEBUGP("lookup src key 0x%x up key for ", key); + DUMP_TUPLE_GRE(t); + + return key; +} + +/* add a single keymap entry, associate with specified master ct */ +int +ip_ct_gre_keymap_add(struct ip_conntrack *ct, + struct ip_conntrack_tuple *t, int reply) +{ + struct ip_ct_gre_keymap **exist_km, *km, *old; + + if (!ct->helper || strcmp(ct->helper->name, "pptp")) { + DEBUGP("refusing to add GRE keymap to non-pptp session\n"); + return -1; + } + + if (!reply) + exist_km = &ct->help.ct_pptp_info.keymap_orig; + else + exist_km = &ct->help.ct_pptp_info.keymap_reply; + + if (*exist_km) { + /* check whether it's a retransmission */ + old = LIST_FIND(&gre_keymap_list, gre_key_cmpfn, + struct ip_ct_gre_keymap *, t); + if (old == *exist_km) { + DEBUGP("retransmission\n"); + return 0; + } + + DEBUGP("trying to override keymap_%s for ct %p\n", + reply? "reply":"orig", ct); + return -EEXIST; + } + + km = kmalloc(sizeof(*km), GFP_ATOMIC); + if (!km) + return -ENOMEM; + + memcpy(&km->tuple, t, sizeof(*t)); + *exist_km = km; + + DEBUGP("adding new entry %p: ", km); + DUMP_TUPLE_GRE(&km->tuple); + + write_lock_bh(&ip_ct_gre_lock); + list_append(&gre_keymap_list, km); + write_unlock_bh(&ip_ct_gre_lock); + + return 0; +} + +/* destroy the keymap entries associated with specified master ct */ +void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct) +{ + DEBUGP("entering for ct %p\n", ct); + + if (!ct->helper || strcmp(ct->helper->name, "pptp")) { + DEBUGP("refusing to destroy GRE keymap to non-pptp session\n"); + return; + } + + write_lock_bh(&ip_ct_gre_lock); + if (ct->help.ct_pptp_info.keymap_orig) { + DEBUGP("removing %p from list\n", + ct->help.ct_pptp_info.keymap_orig); + list_del(&ct->help.ct_pptp_info.keymap_orig->list); + kfree(ct->help.ct_pptp_info.keymap_orig); + ct->help.ct_pptp_info.keymap_orig = NULL; + } + if (ct->help.ct_pptp_info.keymap_reply) { + DEBUGP("removing %p from list\n", + ct->help.ct_pptp_info.keymap_reply); + list_del(&ct->help.ct_pptp_info.keymap_reply->list); + kfree(ct->help.ct_pptp_info.keymap_reply); + ct->help.ct_pptp_info.keymap_reply = NULL; + } + write_unlock_bh(&ip_ct_gre_lock); +} + + +/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ + +/* invert gre part of tuple */ +static int gre_invert_tuple(struct ip_conntrack_tuple *tuple, + const struct ip_conntrack_tuple *orig) +{ + tuple->dst.u.gre.key = orig->src.u.gre.key; + tuple->src.u.gre.key = orig->dst.u.gre.key; + + return 1; +} + +/* gre hdr info to tuple */ +static int gre_pkt_to_tuple(const struct sk_buff *skb, + unsigned int dataoff, + struct ip_conntrack_tuple *tuple) +{ + struct gre_hdr_pptp _pgrehdr, *pgrehdr; + u_int32_t srckey; + struct gre_hdr _grehdr, *grehdr; + + /* first only delinearize old RFC1701 GRE header */ + grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr); + if (!grehdr || grehdr->version != GRE_VERSION_PPTP) { + /* try to behave like "ip_conntrack_proto_generic" */ + tuple->src.u.all = 0; + tuple->dst.u.all = 0; + return 1; + } + + /* PPTP header is variable length, only need up to the call_id field */ + pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr); + if (!pgrehdr) + return 1; + + if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { + DEBUGP("GRE_VERSION_PPTP but unknown proto\n"); + return 0; + } + + tuple->dst.u.gre.key = pgrehdr->call_id; + srckey = gre_keymap_lookup(tuple); + tuple->src.u.gre.key = srckey; + + return 1; +} + +/* print gre part of tuple */ +static int gre_print_tuple(struct seq_file *s, + const struct ip_conntrack_tuple *tuple) +{ + return seq_printf(s, "srckey=0x%x dstkey=0x%x ", + ntohs(tuple->src.u.gre.key), + ntohs(tuple->dst.u.gre.key)); +} + +/* print private data for conntrack */ +static int gre_print_conntrack(struct seq_file *s, + const struct ip_conntrack *ct) +{ + return seq_printf(s, "timeout=%u, stream_timeout=%u ", + (ct->proto.gre.timeout / HZ), + (ct->proto.gre.stream_timeout / HZ)); +} + +/* Returns verdict for packet, and may modify conntrack */ +static int gre_packet(struct ip_conntrack *ct, + const struct sk_buff *skb, + enum ip_conntrack_info conntrackinfo) +{ + /* If we've seen traffic both ways, this is a GRE connection. + * Extend timeout. */ + if (ct->status & IPS_SEEN_REPLY) { + ip_ct_refresh_acct(ct, conntrackinfo, skb, + ct->proto.gre.stream_timeout); + /* Also, more likely to be important, and not a probe. */ + set_bit(IPS_ASSURED_BIT, &ct->status); + } else + ip_ct_refresh_acct(ct, conntrackinfo, skb, + ct->proto.gre.timeout); + + return NF_ACCEPT; +} + +/* Called when a new connection for this protocol found. */ +static int gre_new(struct ip_conntrack *ct, + const struct sk_buff *skb) +{ + DEBUGP(": "); + DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + + /* initialize to sane value. Ideally a conntrack helper + * (e.g. in case of pptp) is increasing them */ + ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; + ct->proto.gre.timeout = GRE_TIMEOUT; + + return 1; +} + +/* Called when a conntrack entry has already been removed from the hashes + * and is about to be deleted from memory */ +static void gre_destroy(struct ip_conntrack *ct) +{ + struct ip_conntrack *master = ct->master; + DEBUGP(" entering\n"); + + if (!master) + DEBUGP("no master !?!\n"); + else + ip_ct_gre_keymap_destroy(master); +} + +/* protocol helper struct */ +static struct ip_conntrack_protocol gre = { + .proto = IPPROTO_GRE, + .name = "gre", + .pkt_to_tuple = gre_pkt_to_tuple, + .invert_tuple = gre_invert_tuple, + .print_tuple = gre_print_tuple, + .print_conntrack = gre_print_conntrack, + .packet = gre_packet, + .new = gre_new, + .destroy = gre_destroy, + .me = THIS_MODULE, +#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ + defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) + .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, + .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, +#endif +}; + +/* ip_conntrack_proto_gre initialization */ +int __init ip_ct_proto_gre_init(void) +{ + return ip_conntrack_protocol_register(&gre); +} + +void __exit ip_ct_proto_gre_fini(void) +{ + struct list_head *pos, *n; + + /* delete all keymap entries */ + write_lock_bh(&ip_ct_gre_lock); + list_for_each_safe(pos, n, &gre_keymap_list) { + DEBUGP("deleting keymap %p at module unload time\n", pos); + list_del(pos); + kfree(pos); + } + write_unlock_bh(&ip_ct_gre_lock); + + ip_conntrack_protocol_unregister(&gre); +} + +EXPORT_SYMBOL(ip_ct_gre_keymap_add); +EXPORT_SYMBOL(ip_ct_gre_keymap_destroy); diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c new file mode 100644 index 000000000000..3cdd0684d30d --- /dev/null +++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c @@ -0,0 +1,401 @@ +/* + * ip_nat_pptp.c - Version 3.0 + * + * NAT support for PPTP (Point to Point Tunneling Protocol). + * PPTP is a a protocol for creating virtual private networks. + * It is a specification defined by Microsoft and some vendors + * working with Microsoft. PPTP is built on top of a modified + * version of the Internet Generic Routing Encapsulation Protocol. + * GRE is defined in RFC 1701 and RFC 1702. Documentation of + * PPTP can be found in RFC 2637 + * + * (C) 2000-2005 by Harald Welte + * + * Development of this code funded by Astaro AG (http://www.astaro.com/) + * + * TODO: - NAT to a unique tuple, not to TCP source port + * (needs netfilter tuple reservation) + * + * Changes: + * 2002-02-10 - Version 1.3 + * - Use ip_nat_mangle_tcp_packet() because of cloned skb's + * in local connections (Philip Craig ) + * - add checks for magicCookie and pptp version + * - make argument list of pptp_{out,in}bound_packet() shorter + * - move to C99 style initializers + * - print version number at module loadtime + * 2003-09-22 - Version 1.5 + * - use SNATed tcp sourceport as callid, since we get called before + * TCP header is mangled (Philip Craig ) + * 2004-10-22 - Version 2.0 + * - kernel 2.6.x version + * 2005-06-10 - Version 3.0 + * - kernel >= 2.6.11 version, + * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/) + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IP_NAT_PPTP_VERSION "3.0" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); + + +#if 0 +extern const char *pptp_msg_name[]; +#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ + __FUNCTION__, ## args) +#else +#define DEBUGP(format, args...) +#endif + +static void pptp_nat_expected(struct ip_conntrack *ct, + struct ip_conntrack_expect *exp) +{ + struct ip_conntrack *master = ct->master; + struct ip_conntrack_expect *other_exp; + struct ip_conntrack_tuple t; + struct ip_ct_pptp_master *ct_pptp_info; + struct ip_nat_pptp *nat_pptp_info; + + ct_pptp_info = &master->help.ct_pptp_info; + nat_pptp_info = &master->nat.help.nat_pptp_info; + + /* And here goes the grand finale of corrosion... */ + + if (exp->dir == IP_CT_DIR_ORIGINAL) { + DEBUGP("we are PNS->PAC\n"); + /* therefore, build tuple for PAC->PNS */ + t.src.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip; + t.src.u.gre.key = htons(master->help.ct_pptp_info.pac_call_id); + t.dst.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; + t.dst.u.gre.key = htons(master->help.ct_pptp_info.pns_call_id); + t.dst.protonum = IPPROTO_GRE; + } else { + DEBUGP("we are PAC->PNS\n"); + /* build tuple for PNS->PAC */ + t.src.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; + t.src.u.gre.key = + htons(master->nat.help.nat_pptp_info.pns_call_id); + t.dst.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; + t.dst.u.gre.key = + htons(master->nat.help.nat_pptp_info.pac_call_id); + t.dst.protonum = IPPROTO_GRE; + } + + DEBUGP("trying to unexpect other dir: "); + DUMP_TUPLE(&t); + other_exp = ip_conntrack_expect_find(&t); + if (other_exp) { + ip_conntrack_unexpect_related(other_exp); + ip_conntrack_expect_put(other_exp); + DEBUGP("success\n"); + } else { + DEBUGP("not found!\n"); + } + + ip_nat_follow_master(ct, exp); +} + +/* outbound packets == from PNS to PAC */ +static int +pptp_outbound_pkt(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq) + +{ + struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info; + struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info; + + u_int16_t msg, *cid = NULL, new_callid; + + new_callid = htons(ct_pptp_info->pns_call_id); + + switch (msg = ntohs(ctlh->messageType)) { + case PPTP_OUT_CALL_REQUEST: + cid = &pptpReq->ocreq.callID; + /* FIXME: ideally we would want to reserve a call ID + * here. current netfilter NAT core is not able to do + * this :( For now we use TCP source port. This breaks + * multiple calls within one control session */ + + /* save original call ID in nat_info */ + nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id; + + /* don't use tcph->source since we are at a DSTmanip + * hook (e.g. PREROUTING) and pkt is not mangled yet */ + new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; + + /* save new call ID in ct info */ + ct_pptp_info->pns_call_id = ntohs(new_callid); + break; + case PPTP_IN_CALL_REPLY: + cid = &pptpReq->icreq.callID; + break; + case PPTP_CALL_CLEAR_REQUEST: + cid = &pptpReq->clrreq.callID; + break; + default: + DEBUGP("unknown outbound packet 0x%04x:%s\n", msg, + (msg <= PPTP_MSG_MAX)? + pptp_msg_name[msg]:pptp_msg_name[0]); + /* fall through */ + + case PPTP_SET_LINK_INFO: + /* only need to NAT in case PAC is behind NAT box */ + case PPTP_START_SESSION_REQUEST: + case PPTP_START_SESSION_REPLY: + case PPTP_STOP_SESSION_REQUEST: + case PPTP_STOP_SESSION_REPLY: + case PPTP_ECHO_REQUEST: + case PPTP_ECHO_REPLY: + /* no need to alter packet */ + return NF_ACCEPT; + } + + /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass + * down to here */ + + IP_NF_ASSERT(cid); + + DEBUGP("altering call id from 0x%04x to 0x%04x\n", + ntohs(*cid), ntohs(new_callid)); + + /* mangle packet */ + if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, + (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)), + sizeof(new_callid), + (char *)&new_callid, + sizeof(new_callid)) == 0) + return NF_DROP; + + return NF_ACCEPT; +} + +static int +pptp_exp_gre(struct ip_conntrack_expect *expect_orig, + struct ip_conntrack_expect *expect_reply) +{ + struct ip_ct_pptp_master *ct_pptp_info = + &expect_orig->master->help.ct_pptp_info; + struct ip_nat_pptp *nat_pptp_info = + &expect_orig->master->nat.help.nat_pptp_info; + + struct ip_conntrack *ct = expect_orig->master; + + struct ip_conntrack_tuple inv_t; + struct ip_conntrack_tuple *orig_t, *reply_t; + + /* save original PAC call ID in nat_info */ + nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; + + /* alter expectation */ + orig_t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + reply_t = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; + + /* alter expectation for PNS->PAC direction */ + invert_tuplepr(&inv_t, &expect_orig->tuple); + expect_orig->saved_proto.gre.key = htons(nat_pptp_info->pac_call_id); + expect_orig->tuple.src.u.gre.key = htons(nat_pptp_info->pns_call_id); + expect_orig->tuple.dst.u.gre.key = htons(ct_pptp_info->pac_call_id); + inv_t.src.ip = reply_t->src.ip; + inv_t.dst.ip = reply_t->dst.ip; + inv_t.src.u.gre.key = htons(nat_pptp_info->pac_call_id); + inv_t.dst.u.gre.key = htons(ct_pptp_info->pns_call_id); + + if (!ip_conntrack_expect_related(expect_orig)) { + DEBUGP("successfully registered expect\n"); + } else { + DEBUGP("can't expect_related(expect_orig)\n"); + return 1; + } + + /* alter expectation for PAC->PNS direction */ + invert_tuplepr(&inv_t, &expect_reply->tuple); + expect_reply->saved_proto.gre.key = htons(nat_pptp_info->pns_call_id); + expect_reply->tuple.src.u.gre.key = htons(nat_pptp_info->pac_call_id); + expect_reply->tuple.dst.u.gre.key = htons(ct_pptp_info->pns_call_id); + inv_t.src.ip = orig_t->src.ip; + inv_t.dst.ip = orig_t->dst.ip; + inv_t.src.u.gre.key = htons(nat_pptp_info->pns_call_id); + inv_t.dst.u.gre.key = htons(ct_pptp_info->pac_call_id); + + if (!ip_conntrack_expect_related(expect_reply)) { + DEBUGP("successfully registered expect\n"); + } else { + DEBUGP("can't expect_related(expect_reply)\n"); + ip_conntrack_unexpect_related(expect_orig); + return 1; + } + + if (ip_ct_gre_keymap_add(ct, &expect_reply->tuple, 0) < 0) { + DEBUGP("can't register original keymap\n"); + ip_conntrack_unexpect_related(expect_orig); + ip_conntrack_unexpect_related(expect_reply); + return 1; + } + + if (ip_ct_gre_keymap_add(ct, &inv_t, 1) < 0) { + DEBUGP("can't register reply keymap\n"); + ip_conntrack_unexpect_related(expect_orig); + ip_conntrack_unexpect_related(expect_reply); + ip_ct_gre_keymap_destroy(ct); + return 1; + } + + return 0; +} + +/* inbound packets == from PAC to PNS */ +static int +pptp_inbound_pkt(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq) +{ + struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info; + u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL; + + int ret = NF_ACCEPT, rv; + + new_pcid = htons(nat_pptp_info->pns_call_id); + + switch (msg = ntohs(ctlh->messageType)) { + case PPTP_OUT_CALL_REPLY: + pcid = &pptpReq->ocack.peersCallID; + cid = &pptpReq->ocack.callID; + break; + case PPTP_IN_CALL_CONNECT: + pcid = &pptpReq->iccon.peersCallID; + break; + case PPTP_IN_CALL_REQUEST: + /* only need to nat in case PAC is behind NAT box */ + break; + case PPTP_WAN_ERROR_NOTIFY: + pcid = &pptpReq->wanerr.peersCallID; + break; + case PPTP_CALL_DISCONNECT_NOTIFY: + pcid = &pptpReq->disc.callID; + break; + case PPTP_SET_LINK_INFO: + pcid = &pptpReq->setlink.peersCallID; + break; + + default: + DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)? + pptp_msg_name[msg]:pptp_msg_name[0]); + /* fall through */ + + case PPTP_START_SESSION_REQUEST: + case PPTP_START_SESSION_REPLY: + case PPTP_STOP_SESSION_REQUEST: + case PPTP_STOP_SESSION_REPLY: + case PPTP_ECHO_REQUEST: + case PPTP_ECHO_REPLY: + /* no need to alter packet */ + return NF_ACCEPT; + } + + /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST, + * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ + + /* mangle packet */ + IP_NF_ASSERT(pcid); + DEBUGP("altering peer call id from 0x%04x to 0x%04x\n", + ntohs(*pcid), ntohs(new_pcid)); + + rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, + (void *)pcid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)), + sizeof(new_pcid), (char *)&new_pcid, + sizeof(new_pcid)); + if (rv != NF_ACCEPT) + return rv; + + if (new_cid) { + IP_NF_ASSERT(cid); + DEBUGP("altering call id from 0x%04x to 0x%04x\n", + ntohs(*cid), ntohs(new_cid)); + rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, + (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)), + sizeof(new_cid), + (char *)&new_cid, + sizeof(new_cid)); + if (rv != NF_ACCEPT) + return rv; + } + + /* check for earlier return value of 'switch' above */ + if (ret != NF_ACCEPT) + return ret; + + /* great, at least we don't need to resize packets */ + return NF_ACCEPT; +} + + +extern int __init ip_nat_proto_gre_init(void); +extern void __exit ip_nat_proto_gre_fini(void); + +static int __init init(void) +{ + int ret; + + DEBUGP("%s: registering NAT helper\n", __FILE__); + + ret = ip_nat_proto_gre_init(); + if (ret < 0) + return ret; + + BUG_ON(ip_nat_pptp_hook_outbound); + ip_nat_pptp_hook_outbound = &pptp_outbound_pkt; + + BUG_ON(ip_nat_pptp_hook_inbound); + ip_nat_pptp_hook_inbound = &pptp_inbound_pkt; + + BUG_ON(ip_nat_pptp_hook_exp_gre); + ip_nat_pptp_hook_exp_gre = &pptp_exp_gre; + + BUG_ON(ip_nat_pptp_hook_expectfn); + ip_nat_pptp_hook_expectfn = &pptp_nat_expected; + + printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION); + return 0; +} + +static void __exit fini(void) +{ + DEBUGP("cleanup_module\n" ); + + ip_nat_pptp_hook_expectfn = NULL; + ip_nat_pptp_hook_exp_gre = NULL; + ip_nat_pptp_hook_inbound = NULL; + ip_nat_pptp_hook_outbound = NULL; + + ip_nat_proto_gre_fini(); + /* Make sure noone calls it, meanwhile */ + synchronize_net(); + + printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION); +} + +module_init(init); +module_exit(fini); diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c new file mode 100644 index 000000000000..7c1285401672 --- /dev/null +++ b/net/ipv4/netfilter/ip_nat_proto_gre.c @@ -0,0 +1,214 @@ +/* + * ip_nat_proto_gre.c - Version 2.0 + * + * NAT protocol helper module for GRE. + * + * GRE is a generic encapsulation protocol, which is generally not very + * suited for NAT, as it has no protocol-specific part as port numbers. + * + * It has an optional key field, which may help us distinguishing two + * connections between the same two hosts. + * + * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 + * + * PPTP is built on top of a modified version of GRE, and has a mandatory + * field called "CallID", which serves us for the same purpose as the key + * field in plain GRE. + * + * Documentation about PPTP can be found in RFC 2637 + * + * (C) 2000-2005 by Harald Welte + * + * Development of this code funded by Astaro AG (http://www.astaro.com/) + * + */ + +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Harald Welte "); +MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); + +#if 0 +#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ + __FUNCTION__, ## args) +#else +#define DEBUGP(x, args...) +#endif + +/* is key in given range between min and max */ +static int +gre_in_range(const struct ip_conntrack_tuple *tuple, + enum ip_nat_manip_type maniptype, + const union ip_conntrack_manip_proto *min, + const union ip_conntrack_manip_proto *max) +{ + u_int32_t key; + + if (maniptype == IP_NAT_MANIP_SRC) + key = tuple->src.u.gre.key; + else + key = tuple->dst.u.gre.key; + + return ntohl(key) >= ntohl(min->gre.key) + && ntohl(key) <= ntohl(max->gre.key); +} + +/* generate unique tuple ... */ +static int +gre_unique_tuple(struct ip_conntrack_tuple *tuple, + const struct ip_nat_range *range, + enum ip_nat_manip_type maniptype, + const struct ip_conntrack *conntrack) +{ + static u_int16_t key; + u_int16_t *keyptr; + unsigned int min, i, range_size; + + if (maniptype == IP_NAT_MANIP_SRC) + keyptr = &tuple->src.u.gre.key; + else + keyptr = &tuple->dst.u.gre.key; + + if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { + DEBUGP("%p: NATing GRE PPTP\n", conntrack); + min = 1; + range_size = 0xffff; + } else { + min = ntohl(range->min.gre.key); + range_size = ntohl(range->max.gre.key) - min + 1; + } + + DEBUGP("min = %u, range_size = %u\n", min, range_size); + + for (i = 0; i < range_size; i++, key++) { + *keyptr = htonl(min + key % range_size); + if (!ip_nat_used_tuple(tuple, conntrack)) + return 1; + } + + DEBUGP("%p: no NAT mapping\n", conntrack); + + return 0; +} + +/* manipulate a GRE packet according to maniptype */ +static int +gre_manip_pkt(struct sk_buff **pskb, + unsigned int iphdroff, + const struct ip_conntrack_tuple *tuple, + enum ip_nat_manip_type maniptype) +{ + struct gre_hdr *greh; + struct gre_hdr_pptp *pgreh; + struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); + unsigned int hdroff = iphdroff + iph->ihl*4; + + /* pgreh includes two optional 32bit fields which are not required + * to be there. That's where the magic '8' comes from */ + if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh)-8)) + return 0; + + greh = (void *)(*pskb)->data + hdroff; + pgreh = (struct gre_hdr_pptp *) greh; + + /* we only have destination manip of a packet, since 'source key' + * is not present in the packet itself */ + if (maniptype == IP_NAT_MANIP_DST) { + /* key manipulation is always dest */ + switch (greh->version) { + case 0: + if (!greh->key) { + DEBUGP("can't nat GRE w/o key\n"); + break; + } + if (greh->csum) { + /* FIXME: Never tested this code... */ + *(gre_csum(greh)) = + ip_nat_cheat_check(~*(gre_key(greh)), + tuple->dst.u.gre.key, + *(gre_csum(greh))); + } + *(gre_key(greh)) = tuple->dst.u.gre.key; + break; + case GRE_VERSION_PPTP: + DEBUGP("call_id -> 0x%04x\n", + ntohl(tuple->dst.u.gre.key)); + pgreh->call_id = htons(ntohl(tuple->dst.u.gre.key)); + break; + default: + DEBUGP("can't nat unknown GRE version\n"); + return 0; + break; + } + } + return 1; +} + +/* print out a nat tuple */ +static unsigned int +gre_print(char *buffer, + const struct ip_conntrack_tuple *match, + const struct ip_conntrack_tuple *mask) +{ + unsigned int len = 0; + + if (mask->src.u.gre.key) + len += sprintf(buffer + len, "srckey=0x%x ", + ntohl(match->src.u.gre.key)); + + if (mask->dst.u.gre.key) + len += sprintf(buffer + len, "dstkey=0x%x ", + ntohl(match->src.u.gre.key)); + + return len; +} + +/* print a range of keys */ +static unsigned int +gre_print_range(char *buffer, const struct ip_nat_range *range) +{ + if (range->min.gre.key != 0 + || range->max.gre.key != 0xFFFF) { + if (range->min.gre.key == range->max.gre.key) + return sprintf(buffer, "key 0x%x ", + ntohl(range->min.gre.key)); + else + return sprintf(buffer, "keys 0x%u-0x%u ", + ntohl(range->min.gre.key), + ntohl(range->max.gre.key)); + } else + return 0; +} + +/* nat helper struct */ +static struct ip_nat_protocol gre = { + .name = "GRE", + .protonum = IPPROTO_GRE, + .manip_pkt = gre_manip_pkt, + .in_range = gre_in_range, + .unique_tuple = gre_unique_tuple, + .print = gre_print, + .print_range = gre_print_range, +#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ + defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) + .range_to_nfattr = ip_nat_port_range_to_nfattr, + .nfattr_to_range = ip_nat_port_nfattr_to_range, +#endif +}; + +int __init ip_nat_proto_gre_init(void) +{ + return ip_nat_protocol_register(&gre); +} + +void __exit ip_nat_proto_gre_fini(void) +{ + ip_nat_protocol_unregister(&gre); +} -- cgit v1.2.3 From e674d0f38de6109b59dbe30fba8b296a03229b8e Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Mon, 19 Sep 2005 15:34:40 -0700 Subject: [NETFILTER] ip6tables: remove duplicate code Some IPv6 matches have very similar loops to find IPv6 extension header and we can unify them. This patch introduces ipv6_find_hdr() to do it. I just checked that it can find the target headers in the packet which has dst,hbh,rt,frag,ah,esp headers. Signed-off-by: Yasuyuki Kozakai Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv6/ip6_tables.h | 3 ++ net/ipv6/netfilter/ip6_tables.c | 52 ++++++++++++++++++ net/ipv6/netfilter/ip6t_ah.c | 81 ++-------------------------- net/ipv6/netfilter/ip6t_dst.c | 88 +++--------------------------- net/ipv6/netfilter/ip6t_esp.c | 73 ++----------------------- net/ipv6/netfilter/ip6t_frag.c | 90 ++++--------------------------- net/ipv6/netfilter/ip6t_hbh.c | 88 +++--------------------------- net/ipv6/netfilter/ip6t_rt.c | 83 +++------------------------- 8 files changed, 94 insertions(+), 464 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 58c72a52dc65..59f70b34e029 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -455,6 +455,9 @@ extern unsigned int ip6t_do_table(struct sk_buff **pskb, /* Check for an extension */ extern int ip6t_ext_hdr(u8 nexthdr); +/* find specified header and get offset to it */ +extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + u8 target); #define IP6T_ALIGN(s) (((s) + (__alignof__(struct ip6t_entry)-1)) & ~(__alignof__(struct ip6t_entry)-1)) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1cb8adb2787f..2da514b16d95 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1955,6 +1955,57 @@ static void __exit fini(void) #endif } +/* + * find specified header up to transport protocol header. + * If found target header, the offset to the header is set to *offset + * and return 0. otherwise, return -1. + * + * Notes: - non-1st Fragment Header isn't skipped. + * - ESP header isn't skipped. + * - The target header may be trancated. + */ +int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, u8 target) +{ + unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data; + u8 nexthdr = skb->nh.ipv6h->nexthdr; + unsigned int len = skb->len - start; + + while (nexthdr != target) { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) + return -1; + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (hp == NULL) + return -1; + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off, *fp; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (fp == NULL) + return -1; + + if (ntohs(*fp) & ~0x7) + return -1; + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) + hdrlen = (hp->hdrlen + 2) << 2; + else + hdrlen = ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + + *offset = start; + return 0; +} + EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); @@ -1963,6 +2014,7 @@ EXPORT_SYMBOL(ip6t_unregister_match); EXPORT_SYMBOL(ip6t_register_target); EXPORT_SYMBOL(ip6t_unregister_target); EXPORT_SYMBOL(ip6t_ext_hdr); +EXPORT_SYMBOL(ipv6_find_hdr); module_init(init); module_exit(fini); diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index d5b94f142bba..dde37793d20b 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c @@ -48,92 +48,21 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ip_auth_hdr *ah = NULL, _ah; + struct ip_auth_hdr *ah, _ah; const struct ip6t_ah *ahinfo = matchinfo; - unsigned int temp; - int len; - u8 nexthdr; unsigned int ptr; unsigned int hdrlen = 0; - /*DEBUGP("IPv6 AH entered\n");*/ - /* if (opt->auth == 0) return 0; - * It does not filled on output */ - - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; - - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - DEBUGP("ipv6_ah header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) - break; - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) - break; - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) - hdrlen = 8; - else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* AH -> evaluate */ - if (nexthdr == NEXTHDR_AUTH) { - temp |= MASK_AH; - break; - } - - - /* set the flag */ - switch (nexthdr) { - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_ah match: unknown nextheader %u\n",nexthdr); - return 0; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if (ptr > skb->len) { - DEBUGP("ipv6_ah: new pointer too large! \n"); - break; - } - } - - /* AH header not found */ - if (temp != MASK_AH) + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH) < 0) return 0; - if (len < sizeof(struct ip_auth_hdr)){ + ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah); + if (ah == NULL) { *hotdrop = 1; return 0; } - ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah); - BUG_ON(ah == NULL); + hdrlen = (ah->hdrlen + 2) << 2; DEBUGP("IPv6 AH LEN %u %u ", hdrlen, ah->hdrlen); DEBUGP("RES %04X ", ah->reserved); diff --git a/net/ipv6/netfilter/ip6t_dst.c b/net/ipv6/netfilter/ip6t_dst.c index 540925e4a7a8..c450a635e54b 100644 --- a/net/ipv6/netfilter/ip6t_dst.c +++ b/net/ipv6/netfilter/ip6t_dst.c @@ -63,8 +63,6 @@ match(const struct sk_buff *skb, struct ipv6_opt_hdr _optsh, *oh; const struct ip6t_opts *optinfo = matchinfo; unsigned int temp; - unsigned int len; - u8 nexthdr; unsigned int ptr; unsigned int hdrlen = 0; unsigned int ret = 0; @@ -72,97 +70,25 @@ match(const struct sk_buff *skb, u8 _optlen, *lp = NULL; unsigned int optlen; - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; - - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - DEBUGP("ipv6_opts header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) { - break; - } - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { - break; - } - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) { - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* OPTS -> evaluate */ #if HOPBYHOP - if (nexthdr == NEXTHDR_HOP) { - temp |= MASK_HOPOPTS; + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP) < 0) #else - if (nexthdr == NEXTHDR_DEST) { - temp |= MASK_DSTOPTS; + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_DEST) < 0) #endif - break; - } - + return 0; - /* set the flag */ - switch (nexthdr){ - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_opts match: unknown nextheader %u\n",nexthdr); - return 0; - break; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if ( ptr > skb->len ) { - DEBUGP("ipv6_opts: new pointer is too large! \n"); - break; - } - } - - /* OPTIONS header not found */ -#if HOPBYHOP - if ( temp != MASK_HOPOPTS ) return 0; -#else - if ( temp != MASK_DSTOPTS ) return 0; -#endif - - if (len < (int)sizeof(struct ipv6_opt_hdr)){ + oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); + if (oh == NULL){ *hotdrop = 1; return 0; } - if (len < hdrlen){ + hdrlen = ipv6_optlen(oh); + if (skb->len - ptr < hdrlen){ /* Packet smaller than it's length field */ return 0; } - oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); - BUG_ON(oh == NULL); - DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); DEBUGP("len %02X %04X %02X ", diff --git a/net/ipv6/netfilter/ip6t_esp.c b/net/ipv6/netfilter/ip6t_esp.c index e39dd236fd8e..24bc0cde43a1 100644 --- a/net/ipv6/netfilter/ip6t_esp.c +++ b/net/ipv6/netfilter/ip6t_esp.c @@ -48,87 +48,22 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ip_esp_hdr _esp, *eh = NULL; + struct ip_esp_hdr _esp, *eh; const struct ip6t_esp *espinfo = matchinfo; - unsigned int temp; - int len; - u8 nexthdr; unsigned int ptr; /* Make sure this isn't an evil packet */ /*DEBUGP("ipv6_esp entered \n");*/ - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; - - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - int hdrlen; - - DEBUGP("ipv6_esp header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) - break; - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { - temp |= MASK_ESP; - break; - } - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) - hdrlen = 8; - else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* set the flag */ - switch (nexthdr) { - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_esp match: unknown nextheader %u\n",nexthdr); - return 0; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if (ptr > skb->len) { - DEBUGP("ipv6_esp: new pointer too large! \n"); - break; - } - } - - /* ESP header not found */ - if (temp != MASK_ESP) + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ESP) < 0) return 0; - if (len < sizeof(struct ip_esp_hdr)) { + eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp); + if (eh == NULL) { *hotdrop = 1; return 0; } - eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp); - BUG_ON(eh == NULL); - DEBUGP("IPv6 ESP SPI %u %08X\n", ntohl(eh->spi), ntohl(eh->spi)); return (eh != NULL) diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index 4bfa30a9bc80..085d5f8eea29 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c @@ -48,90 +48,18 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct frag_hdr _frag, *fh = NULL; + struct frag_hdr _frag, *fh; const struct ip6t_frag *fraginfo = matchinfo; - unsigned int temp; - int len; - u8 nexthdr; unsigned int ptr; - unsigned int hdrlen = 0; - - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; - - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - DEBUGP("ipv6_frag header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) { - break; - } - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { - break; - } - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) { - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* FRAG -> evaluate */ - if (nexthdr == NEXTHDR_FRAGMENT) { - temp |= MASK_FRAGMENT; - break; - } - - - /* set the flag */ - switch (nexthdr){ - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_frag match: unknown nextheader %u\n",nexthdr); - return 0; - break; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if ( ptr > skb->len ) { - DEBUGP("ipv6_frag: new pointer too large! \n"); - break; - } - } - - /* FRAG header not found */ - if ( temp != MASK_FRAGMENT ) return 0; - - if (len < sizeof(struct frag_hdr)){ - *hotdrop = 1; - return 0; - } - fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); - BUG_ON(fh == NULL); + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT) < 0) + return 0; + + fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); + if (fh == NULL){ + *hotdrop = 1; + return 0; + } DEBUGP("INFO %04X ", fh->frag_off); DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 27f3650d127e..1d09485111d0 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -63,8 +63,6 @@ match(const struct sk_buff *skb, struct ipv6_opt_hdr _optsh, *oh; const struct ip6t_opts *optinfo = matchinfo; unsigned int temp; - unsigned int len; - u8 nexthdr; unsigned int ptr; unsigned int hdrlen = 0; unsigned int ret = 0; @@ -72,97 +70,25 @@ match(const struct sk_buff *skb, u8 _optlen, *lp = NULL; unsigned int optlen; - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; - - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - DEBUGP("ipv6_opts header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) { - break; - } - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { - break; - } - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) { - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* OPTS -> evaluate */ #if HOPBYHOP - if (nexthdr == NEXTHDR_HOP) { - temp |= MASK_HOPOPTS; + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP) < 0) #else - if (nexthdr == NEXTHDR_DEST) { - temp |= MASK_DSTOPTS; + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_DEST) < 0) #endif - break; - } - + return 0; - /* set the flag */ - switch (nexthdr){ - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_opts match: unknown nextheader %u\n",nexthdr); - return 0; - break; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if ( ptr > skb->len ) { - DEBUGP("ipv6_opts: new pointer is too large! \n"); - break; - } - } - - /* OPTIONS header not found */ -#if HOPBYHOP - if ( temp != MASK_HOPOPTS ) return 0; -#else - if ( temp != MASK_DSTOPTS ) return 0; -#endif - - if (len < (int)sizeof(struct ipv6_opt_hdr)){ + oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); + if (oh == NULL){ *hotdrop = 1; return 0; } - if (len < hdrlen){ + hdrlen = ipv6_optlen(oh); + if (skb->len - ptr < hdrlen){ /* Packet smaller than it's length field */ return 0; } - oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); - BUG_ON(oh == NULL); - DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); DEBUGP("len %02X %04X %02X ", diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index 2bb670037df3..beb2fd5cebbb 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -50,98 +50,29 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ipv6_rt_hdr _route, *rh = NULL; + struct ipv6_rt_hdr _route, *rh; const struct ip6t_rt *rtinfo = matchinfo; unsigned int temp; - unsigned int len; - u8 nexthdr; unsigned int ptr; unsigned int hdrlen = 0; unsigned int ret = 0; struct in6_addr *ap, _addr; - /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; - /* pointer to the 1st exthdr */ - ptr = sizeof(struct ipv6hdr); - /* available length */ - len = skb->len - ptr; - temp = 0; + if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING) < 0) + return 0; - while (ip6t_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - DEBUGP("ipv6_rt header iteration \n"); - - /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return 0; - /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) { - break; - } - /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { - break; - } - - hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); - BUG_ON(hp == NULL); - - /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) { - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - /* ROUTING -> evaluate */ - if (nexthdr == NEXTHDR_ROUTING) { - temp |= MASK_ROUTING; - break; - } - - - /* set the flag */ - switch (nexthdr){ - case NEXTHDR_HOP: - case NEXTHDR_ROUTING: - case NEXTHDR_FRAGMENT: - case NEXTHDR_AUTH: - case NEXTHDR_DEST: - break; - default: - DEBUGP("ipv6_rt match: unknown nextheader %u\n",nexthdr); - return 0; - break; - } - - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; - if ( ptr > skb->len ) { - DEBUGP("ipv6_rt: new pointer is too large! \n"); - break; - } - } - - /* ROUTING header not found */ - if ( temp != MASK_ROUTING ) return 0; - - if (len < (int)sizeof(struct ipv6_rt_hdr)){ + rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); + if (rh == NULL){ *hotdrop = 1; return 0; } - if (len < hdrlen){ + hdrlen = ipv6_optlen(rh); + if (skb->len - ptr < hdrlen){ /* Pcket smaller than its length field */ return 0; } - rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); - BUG_ON(rh == NULL); - DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); DEBUGP("TYPE %04X ", rh->type); DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); -- cgit v1.2.3 From a41bc00234a0a2ccaa99a194341ae108ae17ddc8 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 19 Sep 2005 15:35:31 -0700 Subject: [NETFILTER]: Rename misnamed function Both __ip_conntrack_expect_find and ip_conntrack_expect_find_get take a reference to the expectation, the difference is that callers of __ip_conntrack_expect_find must hold ip_conntrack_lock. Signed-off-by: Patrick McHardy Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_conntrack.h | 2 +- net/ipv4/netfilter/ip_conntrack_core.c | 2 +- net/ipv4/netfilter/ip_conntrack_netlink.c | 4 ++-- net/ipv4/netfilter/ip_conntrack_standalone.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index 2df446c952ef..bace72a76cc4 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -384,7 +384,7 @@ extern struct ip_conntrack_expect * __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple); extern struct ip_conntrack_expect * -ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple); +ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple); extern struct ip_conntrack_tuple_hash * __ip_conntrack_find(const struct ip_conntrack_tuple *tuple, diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index f8cd8e42961e..c1f82e0c81cf 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -233,7 +233,7 @@ __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) /* Just find a expectation corresponding to a tuple. */ struct ip_conntrack_expect * -ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) +ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) { struct ip_conntrack_expect *i; diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index 15aef3564742..b08a432efcf8 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c @@ -1270,7 +1270,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, if (err < 0) return err; - exp = ip_conntrack_expect_find_get(&tuple); + exp = ip_conntrack_expect_find(&tuple); if (!exp) return -ENOENT; @@ -1318,7 +1318,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, return err; /* bump usage count to 2 */ - exp = ip_conntrack_expect_find_get(&tuple); + exp = ip_conntrack_expect_find(&tuple); if (!exp) return -ENOENT; diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index ae3e3e655db5..d3c7808010ec 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -993,11 +993,11 @@ EXPORT_SYMBOL(ip_ct_refresh_acct); EXPORT_SYMBOL(ip_conntrack_expect_alloc); EXPORT_SYMBOL(ip_conntrack_expect_put); -EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get); +EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); +EXPORT_SYMBOL_GPL(ip_conntrack_expect_find); EXPORT_SYMBOL(ip_conntrack_expect_related); EXPORT_SYMBOL(ip_conntrack_unexpect_related); EXPORT_SYMBOL_GPL(ip_conntrack_expect_list); -EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); EXPORT_SYMBOL_GPL(ip_ct_unlink_expect); EXPORT_SYMBOL(ip_conntrack_tuple_taken); -- cgit v1.2.3 From 3c3f8f25c177e4f9e4e00bcc1b90b28b1be37937 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 19 Sep 2005 15:41:28 -0700 Subject: [8021Q]: Add endian annotations. Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 8 ++++---- net/8021q/vlan_dev.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 17d0c0d40b0e..eef0876d8307 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -42,8 +42,8 @@ struct hlist_node; struct vlan_ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ - unsigned short h_vlan_proto; /* Should always be 0x8100 */ - unsigned short h_vlan_TCI; /* Encapsulates priority and VLAN ID */ + __be16 h_vlan_proto; /* Should always be 0x8100 */ + __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */ unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */ }; @@ -55,8 +55,8 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) } struct vlan_hdr { - unsigned short h_vlan_TCI; /* Encapsulates priority and VLAN ID */ - unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */ + __be16 h_vlan_TCI; /* Encapsulates priority and VLAN ID */ + __be16 h_vlan_encapsulated_proto; /* packet type ID field (or len) */ }; #define VLAN_VID_MASK 0xfff diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 145f5cde96cf..b74864889670 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -120,7 +120,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, unsigned short vid; struct net_device_stats *stats; unsigned short vlan_TCI; - unsigned short proto; + __be16 proto; /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ vlan_TCI = ntohs(vhdr->h_vlan_TCI); -- cgit v1.2.3 From e0487992ce1dd7ae7da9c6aabdb19570bb95432b Mon Sep 17 00:00:00 2001 From: "Ed L. Cashin" Date: Mon, 19 Sep 2005 19:57:36 -0700 Subject: [BYTEORDER]: Document alignment and byteorder macros This patch comments the fact that although passing le64_to_cpup et al. is within the intended use of the byteorder macros, using get_unaligned is the recommended way to go. Signed-off-by: Ed L. Cashin Signed-off-by: David S. Miller --- include/linux/byteorder/generic.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 5fde6f4d6c1e..04bd756efc67 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -5,6 +5,10 @@ * linux/byteorder_generic.h * Generic Byte-reordering support * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * * Francois-Rene Rideau 19970707 * gathered all the good ideas from all asm-foo/byteorder.h into one file, * cleaned them up. -- cgit v1.2.3 From 7e871b6c8f1f4fda41e51ef86147facecac3be9f Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Wed, 21 Sep 2005 09:55:38 -0700 Subject: [PATCH] mm: update stale comment for removal of page->list Update comment for the 2.6.6-rc1 conversion from page->list and address_space->{clean,dirty,locked}_pages to radix tree tagging and ->lru. I've mostly avoided to mention page lists (at least I've shortened the comment). Signed-off-by: Paolo 'Blaisorblade' Giarrusso Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 82d7024f0765..0d94c94d9d81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -350,7 +350,8 @@ static inline void put_page(struct page *page) * only one copy in memory, at most, normally. * * For the non-reserved pages, page_count(page) denotes a reference count. - * page_count() == 0 means the page is free. + * page_count() == 0 means the page is free. page->lru is then used for + * freelist management in the buddy allocator. * page_count() == 1 means the page is used for exactly one purpose * (e.g. a private data page of one process). * @@ -376,10 +377,8 @@ static inline void put_page(struct page *page) * attaches, plus 1 if `private' contains something, plus one for * the page cache itself. * - * All pages belonging to an inode are in these doubly linked lists: - * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages; - * using the page->list list_head. These fields are also used for - * freelist managemet (when page_count()==0). + * Instead of keeping dirty/clean pages in per address-space lists, we instead + * now tag pages as dirty/under writeback in the radix tree. * * There is also a per-mapping radix tree mapping index to the page * in memory if present. The tree is rooted at mapping->root. -- cgit v1.2.3 From 7e2cff42cfac27c25202648c5c89f9171e5bc085 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Wed, 21 Sep 2005 09:55:39 -0700 Subject: [PATCH] mm: add a note about partially hardcoded VM_* flags Hugh made me note this line for permission checking in mprotect(): if ((newflags & ~(newflags >> 4)) & 0xf) { after figuring out what's that about, I decided it's nasty enough. Btw Hugh itself didn't like the 0xf. We can safely change it to VM_READ|VM_WRITE|VM_EXEC because we never change VM_SHARED, so no need to check that. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/mprotect.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d94c94d9d81..097b3a3c693d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -136,6 +136,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_EXEC 0x00000004 #define VM_SHARED 0x00000008 +/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020 #define VM_MAYEXEC 0x00000040 diff --git a/mm/mprotect.c b/mm/mprotect.c index e9fbd013ad9a..57577f63b305 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot) newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); - if ((newflags & ~(newflags >> 4)) & 0xf) { + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { error = -EACCES; goto out; } -- cgit v1.2.3 From 7980cbbb30bf044e6f40912a3f6456204ddfc27e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 21 Sep 2005 09:55:43 -0700 Subject: [PATCH] Adds sys_set_mempolicy() in include/linux/syscalls.h Signed-off-by: Eric Dumazet Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/syscalls.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 425f58c8ea4a..a6f03e473737 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -508,5 +508,7 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, asmlinkage long sys_ioprio_set(int which, int who, int ioprio); asmlinkage long sys_ioprio_get(int which, int who); +asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, + unsigned long maxnode); #endif -- cgit v1.2.3 From e86ee6682b649183c11013a98be02f25e9ae399d Mon Sep 17 00:00:00 2001 From: Andy Currid Date: Mon, 19 Sep 2005 06:17:52 -0700 Subject: [PATCH] Add NVIDIA device ID in sata_nv Signed-off-by: Andy Currid Signed-off-by: Jeff Garzik --- drivers/scsi/sata_nv.c | 2 ++ include/linux/pci_ids.h | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index a1d62dee3be6..c05653c7779d 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c @@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6c1a142286a..cb414ea42f02 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1267,7 +1267,8 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E -#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F #define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 #define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 #define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B -- cgit v1.2.3 From 590232a7150674b2036291eaefce085f3f9659c8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 22 Sep 2005 04:30:44 -0300 Subject: [LLC]: Add sysctl support for the LLC timeouts Signed-off-by: Jochen Friedrich Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/sysctl.h | 26 ++++++++- include/net/llc.h | 7 +++ include/net/llc_conn.h | 10 ++-- net/llc/Makefile | 1 + net/llc/af_llc.c | 47 +++++++++++----- net/llc/llc_c_ac.c | 12 ++--- net/llc/llc_conn.c | 13 +++-- net/llc/llc_station.c | 11 ++-- net/llc/sysctl_net_llc.c | 136 +++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 232 insertions(+), 31 deletions(-) create mode 100644 net/llc/sysctl_net_llc.c (limited to 'include/linux') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 3a29a9f9b451..fc8e367f671e 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -202,7 +202,8 @@ enum NET_TR=14, NET_DECNET=15, NET_ECONET=16, - NET_SCTP=17, + NET_SCTP=17, + NET_LLC=18, }; /* /proc/sys/kernel/random */ @@ -522,6 +523,29 @@ enum { NET_IPX_FORWARDING=2 }; +/* /proc/sys/net/llc */ +enum { + NET_LLC2=1, + NET_LLC_STATION=2, +}; + +/* /proc/sys/net/llc/llc2 */ +enum { + NET_LLC2_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/station */ +enum { + NET_LLC_STATION_ACK_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/llc2/timeout */ +enum { + NET_LLC2_ACK_TIMEOUT=1, + NET_LLC2_P_TIMEOUT=2, + NET_LLC2_REJ_TIMEOUT=3, + NET_LLC2_BUSY_TIMEOUT=4, +}; /* /proc/sys/net/appletalk */ enum { diff --git a/include/net/llc.h b/include/net/llc.h index 71769a5aeef3..8b8e2be289b1 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -98,4 +98,11 @@ extern void llc_proc_exit(void); #define llc_proc_init() (0) #define llc_proc_exit() do { } while(0) #endif /* CONFIG_PROC_FS */ +#ifdef CONFIG_SYSCTL +extern int llc_sysctl_init(void); +extern void llc_sysctl_exit(void); +#else +#define llc_sysctl_init() (0) +#define llc_sysctl_exit() do { } while(0) +#endif /* CONFIG_SYSCTL */ #endif /* LLC_H */ diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 8ad3bc2c23d7..8a8ff4810135 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -19,14 +19,14 @@ #define LLC_EVENT 1 #define LLC_PACKET 2 -#define LLC_P_TIME 2 -#define LLC_ACK_TIME 1 -#define LLC_REJ_TIME 3 -#define LLC_BUSY_TIME 3 +#define LLC2_P_TIME 2 +#define LLC2_ACK_TIME 1 +#define LLC2_REJ_TIME 3 +#define LLC2_BUSY_TIME 3 struct llc_timer { struct timer_list timer; - u16 expire; /* timer expire time */ + unsigned long expire; /* timer expire time */ }; struct llc_sock { diff --git a/net/llc/Makefile b/net/llc/Makefile index 5ebd4ed2bd42..4e260cff3c5d 100644 --- a/net/llc/Makefile +++ b/net/llc/Makefile @@ -22,3 +22,4 @@ llc2-y := llc_if.o llc_c_ev.o llc_c_ac.o llc_conn.o llc_c_st.o llc_pdu.o \ llc_sap.o llc_s_ac.o llc_s_ev.o llc_s_st.o af_llc.o llc_station.o llc2-$(CONFIG_PROC_FS) += llc_proc.o +llc2-$(CONFIG_SYSCTL) += sysctl_net_llc.o diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 95444f227510..ef125345a2db 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -877,22 +877,22 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, case LLC_OPT_ACK_TMR_EXP: if (opt > LLC_OPT_MAX_ACK_TMR_EXP) goto out; - llc->ack_timer.expire = opt; + llc->ack_timer.expire = opt * HZ; break; case LLC_OPT_P_TMR_EXP: if (opt > LLC_OPT_MAX_P_TMR_EXP) goto out; - llc->pf_cycle_timer.expire = opt; + llc->pf_cycle_timer.expire = opt * HZ; break; case LLC_OPT_REJ_TMR_EXP: if (opt > LLC_OPT_MAX_REJ_TMR_EXP) goto out; - llc->rej_sent_timer.expire = opt; + llc->rej_sent_timer.expire = opt * HZ; break; case LLC_OPT_BUSY_TMR_EXP: if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) goto out; - llc->busy_state_timer.expire = opt; + llc->busy_state_timer.expire = opt * HZ; break; case LLC_OPT_TX_WIN: if (opt > LLC_OPT_MAX_WIN) @@ -942,17 +942,17 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname, goto out; switch (optname) { case LLC_OPT_RETRY: - val = llc->n2; break; + val = llc->n2; break; case LLC_OPT_SIZE: - val = llc->n1; break; + val = llc->n1; break; case LLC_OPT_ACK_TMR_EXP: - val = llc->ack_timer.expire; break; + val = llc->ack_timer.expire / HZ; break; case LLC_OPT_P_TMR_EXP: - val = llc->pf_cycle_timer.expire; break; + val = llc->pf_cycle_timer.expire / HZ; break; case LLC_OPT_REJ_TMR_EXP: - val = llc->rej_sent_timer.expire; break; + val = llc->rej_sent_timer.expire / HZ; break; case LLC_OPT_BUSY_TMR_EXP: - val = llc->busy_state_timer.expire; break; + val = llc->busy_state_timer.expire / HZ; break; case LLC_OPT_TX_WIN: val = llc->k; break; case LLC_OPT_RX_WIN: @@ -999,6 +999,13 @@ static struct proto_ops llc_ui_ops = { extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); +static char llc_proc_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the proc_fs entries\n"; +static char llc_sysctl_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the sysctl entries\n"; +static char llc_sock_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the network family\n"; + static int __init llc2_init(void) { int rc = proto_register(&llc_proto, 0); @@ -1010,13 +1017,28 @@ static int __init llc2_init(void) llc_station_init(); llc_ui_sap_last_autoport = LLC_SAP_DYN_START; rc = llc_proc_init(); - if (rc != 0) + if (rc != 0) { + printk(llc_proc_err_msg); goto out_unregister_llc_proto; - sock_register(&llc_ui_family_ops); + } + rc = llc_sysctl_init(); + if (rc) { + printk(llc_sysctl_err_msg); + goto out_proc; + } + rc = sock_register(&llc_ui_family_ops); + if (rc) { + printk(llc_sock_err_msg); + goto out_sysctl; + } llc_add_pack(LLC_DEST_SAP, llc_sap_handler); llc_add_pack(LLC_DEST_CONN, llc_conn_handler); out: return rc; +out_sysctl: + llc_sysctl_exit(); +out_proc: + llc_proc_exit(); out_unregister_llc_proto: proto_unregister(&llc_proto); goto out; @@ -1029,6 +1051,7 @@ static void __exit llc2_exit(void) llc_remove_pack(LLC_DEST_CONN); sock_unregister(PF_LLC); llc_proc_exit(); + llc_sysctl_exit(); proto_unregister(&llc_proto); } diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 50c827e13a9f..a4daa91003dd 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -620,7 +620,7 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) if (!llc->remote_busy_flag) { llc->remote_busy_flag = 1; mod_timer(&llc->busy_state_timer.timer, - jiffies + llc->busy_state_timer.expire * HZ); + jiffies + llc->busy_state_timer.expire); } return 0; } @@ -853,7 +853,7 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_set_p_flag(sk, 1); mod_timer(&llc->pf_cycle_timer.timer, - jiffies + llc->pf_cycle_timer.expire * HZ); + jiffies + llc->pf_cycle_timer.expire); return 0; } @@ -1131,7 +1131,7 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); - mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire * HZ); + mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } @@ -1140,7 +1140,7 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->rej_sent_timer.timer, - jiffies + llc->rej_sent_timer.expire * HZ); + jiffies + llc->rej_sent_timer.expire); return 0; } @@ -1151,7 +1151,7 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, if (!timer_pending(&llc->ack_timer.timer)) mod_timer(&llc->ack_timer.timer, - jiffies + llc->ack_timer.expire * HZ); + jiffies + llc->ack_timer.expire); return 0; } @@ -1199,7 +1199,7 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) } if (unacked) mod_timer(&llc->ack_timer.timer, - jiffies + llc->ack_timer.expire * HZ); + jiffies + llc->ack_timer.expire); } else if (llc->failed_data_req) { u8 f_bit; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index ce7b893ed1ab..d3783f8ee481 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -40,6 +40,11 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, /* Offset table on connection states transition diagram */ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; +int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ; +int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ; +int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ; +int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ; + /** * llc_conn_state_process - sends event to connection state machine * @sk: connection @@ -799,22 +804,22 @@ static void llc_sk_init(struct sock* sk) llc->dec_step = llc->connect_step = 1; init_timer(&llc->ack_timer.timer); - llc->ack_timer.expire = LLC_ACK_TIME; + llc->ack_timer.expire = sysctl_llc2_ack_timeout; llc->ack_timer.timer.data = (unsigned long)sk; llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; init_timer(&llc->pf_cycle_timer.timer); - llc->pf_cycle_timer.expire = LLC_P_TIME; + llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout; llc->pf_cycle_timer.timer.data = (unsigned long)sk; llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb; init_timer(&llc->rej_sent_timer.timer); - llc->rej_sent_timer.expire = LLC_REJ_TIME; + llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout; llc->rej_sent_timer.timer.data = (unsigned long)sk; llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb; init_timer(&llc->busy_state_timer.timer); - llc->busy_state_timer.expire = LLC_BUSY_TIME; + llc->busy_state_timer.expire = sysctl_llc2_busy_timeout; llc->busy_state_timer.timer.data = (unsigned long)sk; llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb; diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 85a7ac276141..2d764b0382ce 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -50,6 +50,10 @@ struct llc_station { struct sk_buff_head mac_pdu_q; }; +#define LLC_STATION_ACK_TIME (3 * HZ) + +int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME; + /* Types of events (possible values in 'ev->type') */ #define LLC_STATION_EV_TYPE_SIMPLE 1 #define LLC_STATION_EV_TYPE_CONDITION 2 @@ -218,7 +222,8 @@ static void llc_station_send_pdu(struct sk_buff *skb) static int llc_station_ac_start_ack_timer(struct sk_buff *skb) { - mod_timer(&llc_main_station.ack_timer, jiffies + LLC_ACK_TIME * HZ); + mod_timer(&llc_main_station.ack_timer, + jiffies + sysctl_llc_station_ack_timeout); return 0; } @@ -687,7 +692,8 @@ int __init llc_station_init(void) init_timer(&llc_main_station.ack_timer); llc_main_station.ack_timer.data = (unsigned long)&llc_main_station; llc_main_station.ack_timer.function = llc_station_ack_tmr_cb; - + llc_main_station.ack_timer.expires = jiffies + + sysctl_llc_station_ack_timeout; skb = alloc_skb(0, GFP_ATOMIC); if (!skb) goto out; @@ -695,7 +701,6 @@ int __init llc_station_init(void) llc_set_station_handler(llc_station_rcv); ev = llc_station_ev(skb); memset(ev, 0, sizeof(*ev)); - llc_main_station.ack_timer.expires = jiffies + 3 * HZ; llc_main_station.maximum_retry = 1; llc_main_station.state = LLC_STATION_STATE_DOWN; ev->type = LLC_STATION_EV_TYPE_SIMPLE; diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c new file mode 100644 index 000000000000..4d99d2f27a21 --- /dev/null +++ b/net/llc/sysctl_net_llc.c @@ -0,0 +1,136 @@ +/* + * sysctl_net_llc.c: sysctl interface to LLC net subsystem. + * + * Arnaldo Carvalho de Melo + */ + +#include +#include +#include +#include + +#ifndef CONFIG_SYSCTL +#error This file should not be compiled without CONFIG_SYSCTL defined +#endif + +extern int sysctl_llc2_ack_timeout; +extern int sysctl_llc2_busy_timeout; +extern int sysctl_llc2_p_timeout; +extern int sysctl_llc2_rej_timeout; +extern int sysctl_llc_station_ack_timeout; + +static struct ctl_table llc2_timeout_table[] = { + { + .ctl_name = NET_LLC2_ACK_TIMEOUT, + .procname = "ack", + .data = &sysctl_llc2_ack_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_BUSY_TIMEOUT, + .procname = "busy", + .data = &sysctl_llc2_busy_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_P_TIMEOUT, + .procname = "p", + .data = &sysctl_llc2_p_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_REJ_TIMEOUT, + .procname = "rej", + .data = &sysctl_llc2_rej_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { 0 }, +}; + +static struct ctl_table llc_station_table[] = { + { + .ctl_name = NET_LLC_STATION_ACK_TIMEOUT, + .procname = "ack_timeout", + .data = &sysctl_llc_station_ack_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { 0 }, +}; + +static struct ctl_table llc2_dir_timeout_table[] = { + { + .ctl_name = NET_LLC2, + .procname = "timeout", + .mode = 0555, + .child = llc2_timeout_table, + }, + { 0 }, +}; + +static struct ctl_table llc_table[] = { + { + .ctl_name = NET_LLC2, + .procname = "llc2", + .mode = 0555, + .child = llc2_dir_timeout_table, + }, + { + .ctl_name = NET_LLC_STATION, + .procname = "station", + .mode = 0555, + .child = llc_station_table, + }, + { 0 }, +}; + +static struct ctl_table llc_dir_table[] = { + { + .ctl_name = NET_LLC, + .procname = "llc", + .mode = 0555, + .child = llc_table, + }, + { 0 }, +}; + +static struct ctl_table llc_root_table[] = { + { + .ctl_name = CTL_NET, + .procname = "net", + .mode = 0555, + .child = llc_dir_table, + }, + { 0 }, +}; + +static struct ctl_table_header *llc_table_header; + +int __init llc_sysctl_init(void) +{ + llc_table_header = register_sysctl_table(llc_root_table, 1); + + return llc_table_header ? 0 : -ENOMEM; +} + +void llc_sysctl_exit(void) +{ + if (llc_table_header) { + unregister_sysctl_table(llc_table_header); + llc_table_header = NULL; + } +} -- cgit v1.2.3 From d305ef5d2a4e77bfa66160513f4a7494126a506b Mon Sep 17 00:00:00 2001 From: Daniel Ritz Date: Thu, 22 Sep 2005 00:47:24 -0700 Subject: [PATCH] driver core: add helper device_is_registered() add the helper and use it instead of open coding the klist_node_attached() check (which is a layering violation IMHO) idea by Alan Stern. Signed-off-by: Daniel Ritz Cc: Alan Stern Signed-off-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds --- drivers/s390/cio/ccwgroup.c | 2 +- drivers/usb/core/message.c | 2 +- drivers/usb/core/usb.c | 6 +++--- include/linux/device.h | 5 +++++ 4 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 91ea8e4777f3..dbb3eb0e330b 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) if (cdev->dev.driver_data) { gdev = (struct ccwgroup_device *)cdev->dev.driver_data; if (get_device(&gdev->dev)) { - if (klist_node_attached(&gdev->dev.knode_bus)) + if (device_is_registered(&gdev->dev)) return gdev; put_device(&gdev->dev); } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c47c8052b486..f1fb67fe22a8 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; - if (!klist_node_attached(&interface->dev.knode_bus)) + if (!device_is_registered(&interface->dev)) continue; dev_dbg (&dev->dev, "unregistering interface %s\n", interface->dev.bus_id); diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 087af73a59dd..7d131509e419 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver, /* if interface was already added, bind now; else let * the future device_add() bind it, bypassing probe() */ - if (klist_node_attached(&dev->knode_bus)) + if (device_is_registered(dev)) device_bind_driver(dev); return 0; @@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver, if (iface->condition != USB_INTERFACE_BOUND) return; - /* release only after device_add() */ - if (klist_node_attached(&dev->knode_bus)) { + /* don't release if the interface hasn't been added yet */ + if (device_is_registered(dev)) { iface->condition = USB_INTERFACE_UNBINDING; device_release_driver(dev); } diff --git a/include/linux/device.h b/include/linux/device.h index 06e5d42f2c7b..95d607a48f06 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -317,6 +317,11 @@ dev_set_drvdata (struct device *dev, void *data) dev->driver_data = data; } +static inline int device_is_registered(struct device *dev) +{ + return klist_node_attached(&dev->knode_bus); +} + /* * High level routines for use by the bus drivers */ -- cgit v1.2.3 From e4c94330e3395ae87451bded2840a25d04f27902 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 22 Sep 2005 21:43:45 -0700 Subject: [PATCH] reboot: comment and factor the main reboot functions In the lead up to 2.6.13 I fixed a large number of reboot problems by making the calling conventions consistent. Despite checking and double checking my work it appears I missed an obvious one. This first patch simply refactors the reboot routines so all of the preparation for various kinds of reboots are in their own functions. Making it very hard to get the various kinds of reboot out of sync. Signed-off-by: Eric W. Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/reboot.h | 4 ++++ kernel/sys.c | 52 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 3b3266ff1a95..7ab2cdb83ef0 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -59,6 +59,10 @@ extern void machine_crash_shutdown(struct pt_regs *); * Architecture independent implemenations of sys_reboot commands. */ +extern void kernel_restart_prepare(char *cmd); +extern void kernel_halt_prepare(void); +extern void kernel_power_off_prepare(void); + extern void kernel_restart(char *cmd); extern void kernel_halt(void); extern void kernel_power_off(void); diff --git a/kernel/sys.c b/kernel/sys.c index f723522e6986..2fa1ed18123c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -361,17 +361,35 @@ out_unlock: return retval; } +/** + * emergency_restart - reboot the system + * + * Without shutting down any hardware or taking any locks + * reboot the system. This is called when we know we are in + * trouble so this is our best effort to reboot. This is + * safe to call in interrupt context. + */ void emergency_restart(void) { machine_emergency_restart(); } EXPORT_SYMBOL_GPL(emergency_restart); -void kernel_restart(char *cmd) +/** + * kernel_restart - reboot the system + * + * Shutdown everything and perform a clean reboot. + * This is not safe to call in interrupt context. + */ +void kernel_restart_prepare(char *cmd) { notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); +} +void kernel_restart(char *cmd) +{ + kernel_restart_prepare(cmd); if (!cmd) { printk(KERN_EMERG "Restarting system.\n"); } else { @@ -382,6 +400,12 @@ void kernel_restart(char *cmd) } EXPORT_SYMBOL_GPL(kernel_restart); +/** + * kernel_kexec - reboot the system + * + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. + */ void kernel_kexec(void) { #ifdef CONFIG_KEXEC @@ -390,9 +414,7 @@ void kernel_kexec(void) if (!image) { return; } - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); - system_state = SYSTEM_RESTART; - device_shutdown(); + kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); machine_kexec(image); @@ -400,21 +422,39 @@ void kernel_kexec(void) } EXPORT_SYMBOL_GPL(kernel_kexec); -void kernel_halt(void) +/** + * kernel_halt - halt the system + * + * Shutdown everything and perform a clean system halt. + */ +void kernel_halt_prepare(void) { notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); system_state = SYSTEM_HALT; device_shutdown(); +} +void kernel_halt(void) +{ + kernel_halt_prepare(); printk(KERN_EMERG "System halted.\n"); machine_halt(); } EXPORT_SYMBOL_GPL(kernel_halt); -void kernel_power_off(void) +/** + * kernel_power_off - power_off the system + * + * Shutdown everything and perform a clean system power_off. + */ +void kernel_power_off_prepare(void) { notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); system_state = SYSTEM_POWER_OFF; device_shutdown(); +} +void kernel_power_off(void) +{ + kernel_power_off_prepare(); printk(KERN_EMERG "Power down.\n"); machine_power_off(); } -- cgit v1.2.3 From 67497205b12e3cb408259cc09b50c3a9d12cd935 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 22 Sep 2005 23:45:24 -0700 Subject: [NETFILTER] Fix sparse endian warnings in pptp helper Signed-off-by: Alexey Dobriyan Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_conntrack_pptp.h | 118 +++++++++++----------- include/linux/netfilter_ipv4/ip_conntrack_tuple.h | 6 +- net/ipv4/netfilter/ip_conntrack_helper_pptp.c | 14 +-- 3 files changed, 70 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h index 389e3851d52f..50a761db5a04 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h @@ -60,8 +60,8 @@ struct ip_ct_pptp_expect { struct pptp_pkt_hdr { __u16 packetLength; - __u16 packetType; - __u32 magicCookie; + __be16 packetType; + __be32 magicCookie; }; /* PptpControlMessageType values */ @@ -93,7 +93,7 @@ struct pptp_pkt_hdr { #define PPTP_REMOVE_DEVICE_ERROR 6 struct PptpControlHeader { - __u16 messageType; + __be16 messageType; __u16 reserved; }; @@ -106,13 +106,13 @@ struct PptpControlHeader { #define PPTP_BEARER_CAP_DIGITAL 0x2 struct PptpStartSessionRequest { - __u16 protocolVersion; + __be16 protocolVersion; __u8 reserved1; __u8 reserved2; - __u32 framingCapability; - __u32 bearerCapability; - __u16 maxChannels; - __u16 firmwareRevision; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; @@ -125,13 +125,13 @@ struct PptpStartSessionRequest { #define PPTP_START_UNKNOWN_PROTOCOL 5 struct PptpStartSessionReply { - __u16 protocolVersion; + __be16 protocolVersion; __u8 resultCode; __u8 generalErrorCode; - __u32 framingCapability; - __u32 bearerCapability; - __u16 maxChannels; - __u16 firmwareRevision; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; @@ -155,7 +155,7 @@ struct PptpStopSessionReply { }; struct PptpEchoRequest { - __u32 identNumber; + __be32 identNumber; }; /* PptpEchoReplyResultCode */ @@ -163,7 +163,7 @@ struct PptpEchoRequest { #define PPTP_ECHO_GENERAL_ERROR 2 struct PptpEchoReply { - __u32 identNumber; + __be32 identNumber; __u8 resultCode; __u8 generalErrorCode; __u16 reserved; @@ -180,16 +180,16 @@ struct PptpEchoReply { #define PPTP_DONT_CARE_BEARER_TYPE 3 struct PptpOutCallRequest { - __u16 callID; - __u16 callSerialNumber; - __u32 minBPS; - __u32 maxBPS; - __u32 bearerType; - __u32 framingType; - __u16 packetWindow; - __u16 packetProcDelay; + __be16 callID; + __be16 callSerialNumber; + __be32 minBPS; + __be32 maxBPS; + __be32 bearerType; + __be32 framingType; + __be16 packetWindow; + __be16 packetProcDelay; __u16 reserved1; - __u16 phoneNumberLength; + __be16 phoneNumberLength; __u16 reserved2; __u8 phoneNumber[64]; __u8 subAddress[64]; @@ -205,24 +205,24 @@ struct PptpOutCallRequest { #define PPTP_OUTCALL_DONT_ACCEPT 7 struct PptpOutCallReply { - __u16 callID; - __u16 peersCallID; + __be16 callID; + __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; - __u16 causeCode; - __u32 connectSpeed; - __u16 packetWindow; - __u16 packetProcDelay; - __u32 physChannelID; + __be16 causeCode; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 physChannelID; }; struct PptpInCallRequest { - __u16 callID; - __u16 callSerialNumber; - __u32 callBearerType; - __u32 physChannelID; - __u16 dialedNumberLength; - __u16 dialingNumberLength; + __be16 callID; + __be16 callSerialNumber; + __be32 callBearerType; + __be32 physChannelID; + __be16 dialedNumberLength; + __be16 dialingNumberLength; __u8 dialedNumber[64]; __u8 dialingNumber[64]; __u8 subAddress[64]; @@ -234,54 +234,54 @@ struct PptpInCallRequest { #define PPTP_INCALL_DONT_ACCEPT 3 struct PptpInCallReply { - __u16 callID; - __u16 peersCallID; + __be16 callID; + __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; - __u16 packetWindow; - __u16 packetProcDelay; + __be16 packetWindow; + __be16 packetProcDelay; __u16 reserved; }; struct PptpInCallConnected { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 connectSpeed; - __u16 packetWindow; - __u16 packetProcDelay; - __u32 callFramingType; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 callFramingType; }; struct PptpClearCallRequest { - __u16 callID; + __be16 callID; __u16 reserved; }; struct PptpCallDisconnectNotify { - __u16 callID; + __be16 callID; __u8 resultCode; __u8 generalErrorCode; - __u16 causeCode; + __be16 causeCode; __u16 reserved; __u8 callStatistics[128]; }; struct PptpWanErrorNotify { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 crcErrors; - __u32 framingErrors; - __u32 hardwareOverRuns; - __u32 bufferOverRuns; - __u32 timeoutErrors; - __u32 alignmentErrors; + __be32 crcErrors; + __be32 framingErrors; + __be32 hardwareOverRuns; + __be32 bufferOverRuns; + __be32 timeoutErrors; + __be32 alignmentErrors; }; struct PptpSetLinkInfo { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 sendAccm; - __u32 recvAccm; + __be32 sendAccm; + __be32 recvAccm; }; diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h index 14dc0f7b6556..20e43f018b7c 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h @@ -17,7 +17,7 @@ union ip_conntrack_manip_proto u_int16_t all; struct { - u_int16_t port; + __be16 port; } tcp; struct { u_int16_t port; @@ -29,7 +29,7 @@ union ip_conntrack_manip_proto u_int16_t port; } sctp; struct { - u_int16_t key; /* key is 32bit, pptp only uses 16 */ + __be16 key; /* key is 32bit, pptp only uses 16 */ } gre; }; @@ -65,7 +65,7 @@ struct ip_conntrack_tuple u_int16_t port; } sctp; struct { - u_int16_t key; /* key is 32bit, + __be16 key; /* key is 32bit, * pptp only uses 16 */ } gre; } u; diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index 117587978700..8236ee0fb090 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c @@ -223,8 +223,8 @@ static void pptp_destroy_siblings(struct ip_conntrack *ct) static inline int exp_gre(struct ip_conntrack *master, u_int32_t seq, - u_int16_t callid, - u_int16_t peer_callid) + __be16 callid, + __be16 peer_callid) { struct ip_conntrack_tuple inv_tuple; struct ip_conntrack_tuple exp_tuples[] = { @@ -263,7 +263,7 @@ exp_gre(struct ip_conntrack *master, exp_orig->mask.src.ip = 0xffffffff; exp_orig->mask.src.u.all = 0; exp_orig->mask.dst.u.all = 0; - exp_orig->mask.dst.u.gre.key = 0xffff; + exp_orig->mask.dst.u.gre.key = htons(0xffff); exp_orig->mask.dst.ip = 0xffffffff; exp_orig->mask.dst.protonum = 0xff; @@ -340,7 +340,8 @@ pptp_inbound_pkt(struct sk_buff **pskb, unsigned int reqlen; union pptp_ctrl_union _pptpReq, *pptpReq; struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg, *cid, *pcid; + u_int16_t msg; + __be16 *cid, *pcid; u_int32_t seq; ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); @@ -551,7 +552,8 @@ pptp_outbound_pkt(struct sk_buff **pskb, unsigned int reqlen; union pptp_ctrl_union _pptpReq, *pptpReq; struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg, *cid, *pcid; + u_int16_t msg; + __be16 *cid, *pcid; ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); if (!ctlh) @@ -755,7 +757,7 @@ static struct ip_conntrack_helper pptp = { } }, .mask = { .src = { .ip = 0, - .u = { .tcp = { .port = 0xffff } } + .u = { .tcp = { .port = __constant_htons(0xffff) } } }, .dst = { .ip = 0, .u = { .all = 0 }, -- cgit v1.2.3 From a82b748930fce0dab22c64075c38c830ae116904 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Thu, 22 Sep 2005 23:45:44 -0700 Subject: [NETFILTER] remove unneeded structure definition from conntrack helper Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_conntrack_pptp.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h index 50a761db5a04..816144c75de0 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h @@ -284,13 +284,6 @@ struct PptpSetLinkInfo { __be32 recvAccm; }; - -struct pptp_priv_data { - __u16 call_id; - __u16 mcall_id; - __u16 pcall_id; -}; - union pptp_ctrl_union { struct PptpStartSessionRequest sreq; struct PptpStartSessionReply srep; -- cgit v1.2.3 From 1dfbab59498d6f227c91988bab6c71af049a5333 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Thu, 22 Sep 2005 23:46:57 -0700 Subject: [NETFILTER] Fix conntrack event cache deadlock/oops This patch fixes a number of bugs. It cannot be reasonably split up in multiple fixes, since all bugs interact with each other and affect the same function: Bug #1: The event cache code cannot be called while a lock is held. Therefore, the call to ip_conntrack_event_cache() within ip_ct_refresh_acct() needs to be moved outside of the locked section. This fixes a number of 2.6.14-rcX oops and deadlock reports. Bug #2: We used to call ct_add_counters() for unconfirmed connections without holding a lock. Since the add operations are not atomic, we could race with another CPU. Bug #3: ip_ct_refresh_acct() lost REFRESH events in some cases where refresh (and the corresponding event) are desired, but no accounting shall be performed. Both, evenst and accounting implicitly depended on the skb parameter bein non-null. We now re-introduce a non-accounting "ip_ct_refresh()" variant to explicitly state the desired behaviour. Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_conntrack.h | 25 +++++++++++--- net/ipv4/netfilter/ip_conntrack_amanda.c | 2 +- net/ipv4/netfilter/ip_conntrack_core.c | 49 ++++++++++++++------------- net/ipv4/netfilter/ip_conntrack_helper_pptp.c | 1 - net/ipv4/netfilter/ip_conntrack_netbios_ns.c | 2 +- net/ipv4/netfilter/ip_conntrack_standalone.c | 2 +- 6 files changed, 49 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index bace72a76cc4..4ced38736813 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -332,11 +332,28 @@ extern void need_ip_conntrack(void); extern int invert_tuplepr(struct ip_conntrack_tuple *inverse, const struct ip_conntrack_tuple *orig); +extern void __ip_ct_refresh_acct(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + unsigned long extra_jiffies, + int do_acct); + +/* Refresh conntrack for this many jiffies and do accounting */ +static inline void ip_ct_refresh_acct(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + unsigned long extra_jiffies) +{ + __ip_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1); +} + /* Refresh conntrack for this many jiffies */ -extern void ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies); +static inline void ip_ct_refresh(struct ip_conntrack *ct, + const struct sk_buff *skb, + unsigned long extra_jiffies) +{ + __ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); +} /* These are for NAT. Icky. */ /* Update TCP window tracking data when NAT mangles the packet */ diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index dc20881004bc..fa3f914117ec 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c @@ -65,7 +65,7 @@ static int help(struct sk_buff **pskb, /* increase the UDP timeout of the master connection as replies from * Amanda clients to the server can be quite delayed */ - ip_ct_refresh_acct(ct, ctinfo, NULL, master_timeout * HZ); + ip_ct_refresh(ct, *pskb, master_timeout * HZ); /* No data? */ dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index c1f82e0c81cf..ea65dd3e517a 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -1112,45 +1112,46 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) synchronize_net(); } -static inline void ct_add_counters(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb) -{ -#ifdef CONFIG_IP_NF_CT_ACCT - if (skb) { - ct->counters[CTINFO2DIR(ctinfo)].packets++; - ct->counters[CTINFO2DIR(ctinfo)].bytes += - ntohs(skb->nh.iph->tot_len); - } -#endif -} - -/* Refresh conntrack for this many jiffies and do accounting (if skb != NULL) */ -void ip_ct_refresh_acct(struct ip_conntrack *ct, +/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ +void __ip_ct_refresh_acct(struct ip_conntrack *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, - unsigned long extra_jiffies) + unsigned long extra_jiffies, + int do_acct) { + int do_event = 0; + IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); + IP_NF_ASSERT(skb); + + write_lock_bh(&ip_conntrack_lock); /* If not in hash table, timer will not be active yet */ if (!is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; - ct_add_counters(ct, ctinfo, skb); + do_event = 1; } else { - write_lock_bh(&ip_conntrack_lock); /* Need del_timer for race avoidance (may already be dying). */ if (del_timer(&ct->timeout)) { ct->timeout.expires = jiffies + extra_jiffies; add_timer(&ct->timeout); - /* FIXME: We loose some REFRESH events if this function - * is called without an skb. I'll fix this later -HW */ - if (skb) - ip_conntrack_event_cache(IPCT_REFRESH, skb); + do_event = 1; } - ct_add_counters(ct, ctinfo, skb); - write_unlock_bh(&ip_conntrack_lock); } + +#ifdef CONFIG_IP_NF_CT_ACCT + if (do_acct) { + ct->counters[CTINFO2DIR(ctinfo)].packets++; + ct->counters[CTINFO2DIR(ctinfo)].bytes += + ntohs(skb->nh.iph->tot_len); + } +#endif + + write_unlock_bh(&ip_conntrack_lock); + + /* must be unlocked when calling event cache */ + if (do_event) + ip_conntrack_event_cache(IPCT_REFRESH, skb); } #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index 8236ee0fb090..926a6684643d 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c @@ -172,7 +172,6 @@ static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) DEBUGP("setting timeout of conntrack %p to 0\n", sibling); sibling->proto.gre.timeout = 0; sibling->proto.gre.stream_timeout = 0; - /* refresh_acct will not modify counters if skb == NULL */ if (del_timer(&sibling->timeout)) sibling->timeout.function((unsigned long)sibling); ip_conntrack_put(sibling); diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c index 71ef19d126d0..577bac22dcc6 100644 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c @@ -91,7 +91,7 @@ static int help(struct sk_buff **pskb, ip_conntrack_expect_related(exp); ip_conntrack_expect_put(exp); - ip_ct_refresh_acct(ct, ctinfo, NULL, timeout * HZ); + ip_ct_refresh(ct, *pskb, timeout * HZ); out: return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index d3c7808010ec..dd476b191f4b 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -989,7 +989,7 @@ EXPORT_SYMBOL(need_ip_conntrack); EXPORT_SYMBOL(ip_conntrack_helper_register); EXPORT_SYMBOL(ip_conntrack_helper_unregister); EXPORT_SYMBOL(ip_ct_iterate_cleanup); -EXPORT_SYMBOL(ip_ct_refresh_acct); +EXPORT_SYMBOL(__ip_ct_refresh_acct); EXPORT_SYMBOL(ip_conntrack_expect_alloc); EXPORT_SYMBOL(ip_conntrack_expect_put); -- cgit v1.2.3 From 8c3520d4eb3b1bbf2e45fbae8dcfb8db06d5e775 Mon Sep 17 00:00:00 2001 From: Daniel Ritz Date: Sun, 21 Aug 2005 22:29:26 -0700 Subject: [PATCH] yenta: auto-tune EnE bridges for CardBus cards Echo Audio cardbus products are known to be incompatible with EnE bridges. in order to maybe solve the problem a EnE specific test bit has to be set, another cleared...but other setups have a good chance to break when just forcing the bits. so do the whole thingy automatically. The patch adds a hook in cb_alloc() that allows special tuning for the different chipsets. for ene just match the Echo products and set/clear the test bits, defaults to do the same thing as w/o the patch to not break working setups. Signed-off-by: Daniel Ritz Cc: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Dominik Brodowski --- drivers/pcmcia/cardbus.c | 5 +++ drivers/pcmcia/ti113x.h | 86 ++++++++++++++++++++++++++++++++++++++----- drivers/pcmcia/yenta_socket.c | 15 ++++++-- include/linux/pci_ids.h | 5 +++ include/pcmcia/ss.h | 9 ++++- 5 files changed, 105 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index 1d755e20880c..3f6d51d11374 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -228,6 +228,11 @@ int cb_alloc(struct pcmcia_socket * s) pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); cardbus_assign_irqs(bus, s->pci_irq); + + /* socket specific tune function */ + if (s->tune_bridge) + s->tune_bridge(s, bus); + pci_enable_bridges(bus); pci_bus_add_devices(bus); diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index fbe233e19ceb..d319f2e7d053 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -153,6 +153,12 @@ /* EnE test register */ #define ENE_TEST_C9 0xc9 /* 8bit */ #define ENE_TEST_C9_TLTENABLE 0x02 +#define ENE_TEST_C9_PFENABLE_F0 0x04 +#define ENE_TEST_C9_PFENABLE_F1 0x08 +#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0) +#define ENE_TEST_C9_WPDISALBLE_F0 0x40 +#define ENE_TEST_C9_WPDISALBLE_F1 0x80 +#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1) /* * Texas Instruments CardBus controller overrides. @@ -790,16 +796,6 @@ static int ti12xx_override(struct yenta_socket *socket) if (val_orig != val) config_writel(socket, TI113X_SYSTEM_CONTROL, val); - /* - * for EnE bridges only: clear testbit TLTEnable. this makes the - * RME Hammerfall DSP sound card working. - */ - if (socket->dev->vendor == PCI_VENDOR_ID_ENE) { - u8 test_c9 = config_readb(socket, ENE_TEST_C9); - test_c9 &= ~ENE_TEST_C9_TLTENABLE; - config_writeb(socket, ENE_TEST_C9, test_c9); - } - /* * Yenta expects controllers to use CSCINT to route * CSC interrupts to PCI rather than INTVAL. @@ -841,5 +837,75 @@ static int ti1250_override(struct yenta_socket *socket) return ti12xx_override(socket); } + +/** + * EnE specific part. EnE bridges are register compatible with TI bridges but + * have their own test registers and more important their own little problems. + * Some fixup code to make everybody happy (TM). + */ + +/** + * set/clear various test bits: + * Defaults to clear the bit. + * - mask (u8) defines what bits to change + * - bits (u8) is the values to change them to + * -> it's + * current = (current & ~mask) | bits + */ +/* pci ids of devices that wants to have the bit set */ +#define DEVID(_vend,_dev,_subvend,_subdev,mask,bits) { \ + .vendor = _vend, \ + .device = _dev, \ + .subvendor = _subvend, \ + .subdevice = _subdev, \ + .driver_data = ((mask) << 8 | (bits)), \ + } +static struct pci_device_id ene_tune_tbl[] = { + /* Echo Audio products based on motorola DSP56301 and DSP56361 */ + DEVID(PCI_VENDOR_ID_MOTOROLA, 0x1801, 0xECC0, PCI_ANY_ID, + ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), + DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID, + ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), + + {} +}; + +static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) +{ + struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); + struct pci_dev *dev; + struct pci_device_id *id = NULL; + u8 test_c9, old_c9, mask, bits; + + list_for_each_entry(dev, &bus->devices, bus_list) { + id = (struct pci_device_id *) pci_match_id(ene_tune_tbl, dev); + if (id) + break; + } + + test_c9 = old_c9 = config_readb(socket, ENE_TEST_C9); + if (id) { + mask = (id->driver_data >> 8) & 0xFF; + bits = id->driver_data & 0xFF; + + test_c9 = (test_c9 & ~mask) | bits; + } + else + /* default to clear TLTEnable bit, old behaviour */ + test_c9 &= ~ENE_TEST_C9_TLTENABLE; + + printk(KERN_INFO "yenta EnE: chaning testregister 0xC9, %02x -> %02x\n", old_c9, test_c9); + config_writeb(socket, ENE_TEST_C9, test_c9); +} + + +static int ene_override(struct yenta_socket *socket) +{ + /* install tune_bridge() function */ + socket->socket.tune_bridge = ene_tune_bridge; + + return ti1250_override(socket); +} + #endif /* _LINUX_TI113X_H */ diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index ba4d78e5b121..fd2a6f892c41 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -819,6 +819,7 @@ enum { CARDBUS_TYPE_TOPIC95, CARDBUS_TYPE_TOPIC97, CARDBUS_TYPE_O2MICRO, + CARDBUS_TYPE_ENE, }; /* @@ -865,6 +866,12 @@ static struct cardbus_type cardbus_type[] = { .override = o2micro_override, .restore_state = o2micro_restore_state, }, + [CARDBUS_TYPE_ENE] = { + .override = ene_override, + .save_state = ti_save_state, + .restore_state = ti_restore_state, + .sock_init = ti_init, + }, }; @@ -1265,10 +1272,10 @@ static struct pci_device_id yenta_table [] = { CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250), CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, TI1250), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE), CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH), CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH), diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b86a4b77007e..92efb2c767f9 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2187,7 +2187,12 @@ #define PCI_DEVICE_ID_ENE_1211 0x1211 #define PCI_DEVICE_ID_ENE_1225 0x1225 #define PCI_DEVICE_ID_ENE_1410 0x1410 +#define PCI_DEVICE_ID_ENE_710 0x1411 +#define PCI_DEVICE_ID_ENE_712 0x1412 #define PCI_DEVICE_ID_ENE_1420 0x1420 +#define PCI_DEVICE_ID_ENE_720 0x1421 +#define PCI_DEVICE_ID_ENE_722 0x1422 + #define PCI_VENDOR_ID_CHELSIO 0x1425 #define PCI_VENDOR_ID_MIPS 0x153f diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 0f7aacc33fe9..c8592c7e8eaa 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -21,6 +21,9 @@ #include #include #include +#ifdef CONFIG_CARDBUS +#include +#endif /* Definitions for card status flags for GetStatus */ #define SS_WRPROT 0x0001 @@ -233,7 +236,11 @@ struct pcmcia_socket { /* so is power hook */ int (*power_hook)(struct pcmcia_socket *sock, int operation); - +#ifdef CONFIG_CARDBUS + /* allows tuning the CB bridge before loading driver for the CB card */ + void (*tune_bridge)(struct pcmcia_socket *sock, struct pci_bus *bus); +#endif + /* state thread */ struct semaphore skt_sem; /* protects socket h/w state */ -- cgit v1.2.3 From 6c1a10dba92cbacb58563f5eacf93807125b488a Mon Sep 17 00:00:00 2001 From: Daniel Ritz Date: Tue, 20 Sep 2005 14:12:17 -0700 Subject: [PATCH] yenta: add support for more TI bridges Support some more TI cardbus bridges. most of them are multifunction devices which adds 1394 controllers, smartcard readers etc. this could also help with the various problems with the XX21 controllers seen on the linux-pcmcia list. Signed-off-by: Daniel Ritz Signed-off-by: Dominik Brodowski --- drivers/pcmcia/ti113x.h | 29 +++++++++++++++++++++++++++++ drivers/pcmcia/yenta_socket.c | 8 ++++++++ include/linux/pci_ids.h | 7 +++++++ 3 files changed, 44 insertions(+) (limited to 'include/linux') diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index d319f2e7d053..da0b404561c9 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -59,6 +59,7 @@ #define TI122X_SCR_SER_STEP 0xc0000000 #define TI122X_SCR_INTRTIE 0x20000000 +#define TIXX21_SCR_TIEALL 0x10000000 #define TI122X_SCR_CBRSVD 0x00400000 #define TI122X_SCR_MRBURSTDN 0x00008000 #define TI122X_SCR_MRBURSTUP 0x00004000 @@ -624,6 +625,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) int devfn; unsigned int state; int ret = 1; + u32 sysctl; /* catch the two-slot controllers */ switch (socket->dev->device) { @@ -646,6 +648,24 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) */ break; + case PCI_DEVICE_ID_TI_X515: + case PCI_DEVICE_ID_TI_X420: + case PCI_DEVICE_ID_TI_X620: + case PCI_DEVICE_ID_TI_XX21_XX11: + case PCI_DEVICE_ID_TI_7410: + case PCI_DEVICE_ID_TI_7610: + /* + * those are either single or dual slot CB with additional functions + * like 1394, smartcard reader, etc. check the TIEALL flag for them + * the TIEALL flag binds the IRQ of all functions toghether. + * we catch the single slot variants later. + */ + sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL); + if (sysctl & TIXX21_SCR_TIEALL) + return 0; + + break; + /* single-slot controllers have the 2nd slot empty always :) */ default: return 1; @@ -658,6 +678,15 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) if (!func) return 1; + /* + * check that the device id of both slots match. this is needed for the + * XX21 and the XX11 controller that share the same device id for single + * and dual slot controllers. return '2nd slot empty'. we already checked + * if the interrupt is tied to another function. + */ + if (socket->dev->device != func->device) + goto out; + slot2 = pci_get_drvdata(func); if (!slot2) goto out; diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 80806c9b43ad..c3e22fca105a 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1249,6 +1249,14 @@ static struct pci_device_id yenta_table [] = { CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250), CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 92efb2c767f9..68f11ac1a314 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -769,6 +769,8 @@ #define PCI_DEVICE_ID_TI_TVP4010 0x3d04 #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 @@ -785,12 +787,17 @@ #define PCI_DEVICE_ID_TI_4451 0xac42 #define PCI_DEVICE_ID_TI_4510 0xac44 #define PCI_DEVICE_ID_TI_4520 0xac46 +#define PCI_DEVICE_ID_TI_7510 0xac47 +#define PCI_DEVICE_ID_TI_7610 0xac48 +#define PCI_DEVICE_ID_TI_7410 0xac49 #define PCI_DEVICE_ID_TI_1410 0xac50 #define PCI_DEVICE_ID_TI_1420 0xac51 #define PCI_DEVICE_ID_TI_1451A 0xac52 #define PCI_DEVICE_ID_TI_1620 0xac54 #define PCI_DEVICE_ID_TI_1520 0xac55 #define PCI_DEVICE_ID_TI_1510 0xac56 +#define PCI_DEVICE_ID_TI_X620 0xac8d +#define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_VENDOR_ID_SONY 0x104d #define PCI_DEVICE_ID_SONY_CXD3222 0x8039 -- cgit v1.2.3 From 4fb7edce52e5b6cf41e3375822d74a27f0b6f2dd Mon Sep 17 00:00:00 2001 From: Kars de Jong Date: Sun, 25 Sep 2005 14:39:46 +0200 Subject: [PATCH] pcmcia: fix cross-platform issues with pcmcia module aliases - Added a missing TO_NATIVE call to scripts/mod/file2alias.c:do_pcmcia_entry() - Add an alignment attribute to struct pcmcia_device_no to solve an alignment issue seen when cross-compiling on x86 for m68k. Signed-off-by: Kars de Jong Signed-off-by: Dominik Brodowski --- include/linux/mod_devicetable.h | 5 +++-- scripts/mod/file2alias.c | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 47da39ba3f03..4ed2107bc020 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -209,10 +209,11 @@ struct pcmcia_device_id { /* for real multi-function devices */ __u8 function; - /* for pseude multi-function devices */ + /* for pseudo multi-function devices */ __u8 device_no; - __u32 prod_id_hash[4]; + __u32 prod_id_hash[4] + __attribute__((aligned(sizeof(__u32)))); /* not matched against in kernelspace*/ #ifdef __KERNEL__ diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index d8ee38aede26..f2ee673329a7 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -295,11 +295,13 @@ static int do_pcmcia_entry(const char *filename, { unsigned int i; + id->match_flags = TO_NATIVE(id->match_flags); id->manf_id = TO_NATIVE(id->manf_id); id->card_id = TO_NATIVE(id->card_id); id->func_id = TO_NATIVE(id->func_id); id->function = TO_NATIVE(id->function); id->device_no = TO_NATIVE(id->device_no); + for (i=0; i<4; i++) { id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]); } -- cgit v1.2.3 From acd042bb2de50d4e6fb969281a00cc8b8b71e46d Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Mon, 26 Sep 2005 15:06:50 -0700 Subject: [CONNECTOR]: async connector mode. If input message rate from userspace is too high, do not drop them, but try to deliver using work queue allocation. Failing there is some kind of congestion control. It also removes warn_on on this condition, which scares people. Signed-off-by: Evgeniy Polyakov Signed-off-by: David S. Miller --- drivers/connector/cn_queue.c | 32 +++++++++++-------- drivers/connector/connector.c | 74 ++++++++++++++++++++++--------------------- include/linux/connector.h | 21 ++++++++---- 3 files changed, 72 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 966632182e2d..9f2f00d82917 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c @@ -31,16 +31,19 @@ #include #include -static void cn_queue_wrapper(void *data) +void cn_queue_wrapper(void *data) { - struct cn_callback_entry *cbq = data; + struct cn_callback_data *d = data; - cbq->cb->callback(cbq->cb->priv); - cbq->destruct_data(cbq->ddata); - cbq->ddata = NULL; + d->callback(d->callback_priv); + + d->destruct_data(d->ddata); + d->ddata = NULL; + + kfree(d->free); } -static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) +static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *)) { struct cn_callback_entry *cbq; @@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac return NULL; } - cbq->cb = cb; - INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); + snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); + memcpy(&cbq->id.id, id, sizeof(struct cb_id)); + cbq->data.callback = callback; + + INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); return cbq; } @@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) return ((i1->idx == i2->idx) && (i1->val == i2->val)); } -int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)) { struct cn_callback_entry *cbq, *__cbq; int found = 0; - cbq = cn_queue_alloc_callback_entry(cb); + cbq = cn_queue_alloc_callback_entry(name, id, callback); if (!cbq) return -ENOMEM; @@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) spin_lock_bh(&dev->queue_lock); list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { + if (cn_cb_equal(&__cbq->id.id, id)) { found = 1; break; } @@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) cbq->nls = dev->nls; cbq->seq = 0; - cbq->group = cbq->cb->id.idx; + cbq->group = cbq->id.id.idx; return 0; } @@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { - if (cn_cb_equal(&cbq->cb->id, id)) { + if (cn_cb_equal(&cbq->id.id, id)) { list_del(&cbq->callback_entry); found = 1; break; diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index aaf6d468a8b9..bb0b3a8de14b 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { + if (cn_cb_equal(&__cbq->id.id, &msg->id)) { found = 1; group = __cbq->group; } @@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v { struct cn_callback_entry *__cbq; struct cn_dev *dev = &cdev; - int found = 0; + int err = -ENODEV; spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { - /* - * Let's scream if there is some magic and the - * data will arrive asynchronously here. - * [i.e. netlink messages will be queued]. - * After the first warning I will fix it - * quickly, but now I think it is - * impossible. --zbr (2004_04_27). - */ + if (cn_cb_equal(&__cbq->id.id, &msg->id)) { if (likely(!test_bit(0, &__cbq->work.pending) && - __cbq->ddata == NULL)) { - __cbq->cb->priv = msg; + __cbq->data.ddata == NULL)) { + __cbq->data.callback_priv = msg; - __cbq->ddata = data; - __cbq->destruct_data = destruct_data; + __cbq->data.ddata = data; + __cbq->data.destruct_data = destruct_data; if (queue_work(dev->cbdev->cn_queue, &__cbq->work)) - found = 1; + err = 0; } else { - printk("%s: cbq->data=%p, " - "work->pending=%08lx.\n", - __func__, __cbq->ddata, - __cbq->work.pending); - WARN_ON(1); + struct work_struct *w; + struct cn_callback_data *d; + + w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); + if (w) { + d = (struct cn_callback_data *)(w+1); + + d->callback_priv = msg; + d->callback = __cbq->data.callback; + d->ddata = data; + d->destruct_data = destruct_data; + d->free = w; + + INIT_LIST_HEAD(&w->entry); + w->pending = 0; + w->func = &cn_queue_wrapper; + w->data = d; + init_timer(&w->timer); + + if (queue_work(dev->cbdev->cn_queue, w)) + err = 0; + else { + kfree(w); + err = -EINVAL; + } + } else + err = -ENOMEM; } break; } } spin_unlock_bh(&dev->cbdev->queue_lock); - return found ? 0 : -ENODEV; + return err; } /* @@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) { int err; struct cn_dev *dev = &cdev; - struct cn_callback *cb; - - cb = kzalloc(sizeof(*cb), GFP_KERNEL); - if (!cb) - return -ENOMEM; - - scnprintf(cb->name, sizeof(cb->name), "%s", name); - memcpy(&cb->id, id, sizeof(cb->id)); - cb->callback = callback; - - err = cn_queue_add_callback(dev->cbdev, cb); - if (err) { - kfree(cb); + err = cn_queue_add_callback(dev->cbdev, name, id, callback); + if (err) return err; - } cn_notify(id, 0); diff --git a/include/linux/connector.h b/include/linux/connector.h index 96de26301f84..86d4b0a81713 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -104,12 +104,19 @@ struct cn_queue_dev { struct sock *nls; }; -struct cn_callback { +struct cn_callback_id { unsigned char name[CN_CBQ_NAMELEN]; - struct cb_id id; +}; + +struct cn_callback_data { + void (*destruct_data) (void *); + void *ddata; + + void *callback_priv; void (*callback) (void *); - void *priv; + + void *free; }; struct cn_callback_entry { @@ -118,8 +125,8 @@ struct cn_callback_entry { struct work_struct work; struct cn_queue_dev *pdev; - void (*destruct_data) (void *); - void *ddata; + struct cn_callback_id id; + struct cn_callback_data data; int seq, group; struct sock *nls; @@ -144,7 +151,7 @@ int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); void cn_del_callback(struct cb_id *); int cn_netlink_send(struct cn_msg *, u32, int); -int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb); +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); @@ -152,6 +159,8 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); int cn_cb_equal(struct cb_id *, struct cb_id *); +void cn_queue_wrapper(void *data); + extern int cn_already_initialized; #endif /* __KERNEL__ */ -- cgit v1.2.3 From 188bab3ae0ed164bc18f98be932512d777dd038b Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Mon, 26 Sep 2005 15:25:11 -0700 Subject: [NETFILTER]: Fix invalid module autoloading by splitting iptable_nat When you've enabled conntrack and NAT as a module (standard case in all distributions), and you've also enabled the new conntrack netlink interface, loading ip_conntrack_netlink.ko will auto-load iptable_nat.ko. This causes a huge performance penalty, since for every packet you iterate the nat code, even if you don't want it. This patch splits iptable_nat.ko into the NAT core (ip_nat.ko) and the iptables frontend (iptable_nat.ko). Threfore, ip_conntrack_netlink.ko will only pull ip_nat.ko, but not the frontend. ip_nat.ko will "only" allocate some resources, but not affect runtime performance. This separation is also a nice step in anticipation of new packet filters (nf-hipac, ipset, pkttables) being able to use the NAT core. Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- include/linux/netfilter_ipv4/ip_nat_core.h | 12 +++++----- net/ipv4/netfilter/Makefile | 5 +++-- net/ipv4/netfilter/ip_nat_core.c | 35 ++++++++++++++++++++---------- net/ipv4/netfilter/ip_nat_helper.c | 4 ++++ net/ipv4/netfilter/ip_nat_standalone.c | 25 ++++----------------- 5 files changed, 40 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_ipv4/ip_nat_core.h b/include/linux/netfilter_ipv4/ip_nat_core.h index 3b50eb91f007..30db23f06b03 100644 --- a/include/linux/netfilter_ipv4/ip_nat_core.h +++ b/include/linux/netfilter_ipv4/ip_nat_core.h @@ -5,16 +5,14 @@ /* This header used to share core functionality between the standalone NAT module, and the compatibility layer's use of NAT for masquerading. */ -extern int ip_nat_init(void); -extern void ip_nat_cleanup(void); -extern unsigned int nat_packet(struct ip_conntrack *ct, +extern unsigned int ip_nat_packet(struct ip_conntrack *ct, enum ip_conntrack_info conntrackinfo, unsigned int hooknum, struct sk_buff **pskb); -extern int icmp_reply_translation(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_nat_manip_type manip, - enum ip_conntrack_dir dir); +extern int ip_nat_icmp_reply_translation(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_nat_manip_type manip, + enum ip_conntrack_dir dir); #endif /* _IP_NAT_CORE_H */ diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 89002533f2a2..dab4b58dd31e 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -4,7 +4,8 @@ # objects for the standalone - connection tracking / NAT ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o -iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o +ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o +iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o @@ -40,7 +41,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o # the three instances of ip_tables obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o -obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o +obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o ip_nat.o obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o # matches diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index c3ea891d38e7..c5e3abd24672 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c @@ -74,12 +74,14 @@ ip_nat_proto_find_get(u_int8_t protonum) return p; } +EXPORT_SYMBOL_GPL(ip_nat_proto_find_get); void ip_nat_proto_put(struct ip_nat_protocol *p) { module_put(p->me); } +EXPORT_SYMBOL_GPL(ip_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int @@ -111,6 +113,7 @@ ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) return csum_fold(csum_partial((char *)diffs, sizeof(diffs), oldcheck^0xFFFF)); } +EXPORT_SYMBOL(ip_nat_cheat_check); /* Is this tuple already taken? (not by us) */ int @@ -127,6 +130,7 @@ ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, invert_tuplepr(&reply, tuple); return ip_conntrack_tuple_taken(&reply, ignored_conntrack); } +EXPORT_SYMBOL(ip_nat_used_tuple); /* If we source map this tuple so reply looks like reply_tuple, will * that meet the constraints of range. */ @@ -347,6 +351,7 @@ ip_nat_setup_info(struct ip_conntrack *conntrack, return NF_ACCEPT; } +EXPORT_SYMBOL(ip_nat_setup_info); /* Returns true if succeeded. */ static int @@ -387,10 +392,10 @@ manip_pkt(u_int16_t proto, } /* Do packet manipulations according to ip_nat_setup_info. */ -unsigned int nat_packet(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff **pskb) +unsigned int ip_nat_packet(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + unsigned int hooknum, + struct sk_buff **pskb) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned long statusbit; @@ -417,12 +422,13 @@ unsigned int nat_packet(struct ip_conntrack *ct, } return NF_ACCEPT; } +EXPORT_SYMBOL_GPL(ip_nat_packet); /* Dir is direction ICMP is coming from (opposite to packet it contains) */ -int icmp_reply_translation(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_nat_manip_type manip, - enum ip_conntrack_dir dir) +int ip_nat_icmp_reply_translation(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_nat_manip_type manip, + enum ip_conntrack_dir dir) { struct { struct icmphdr icmp; @@ -509,6 +515,7 @@ int icmp_reply_translation(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation); /* Protocol registration. */ int ip_nat_protocol_register(struct ip_nat_protocol *proto) @@ -525,6 +532,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto) write_unlock_bh(&ip_nat_lock); return ret; } +EXPORT_SYMBOL(ip_nat_protocol_register); /* Noone stores the protocol anywhere; simply delete it. */ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) @@ -536,6 +544,7 @@ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) /* Someone could be still looking at the proto in a bh. */ synchronize_net(); } +EXPORT_SYMBOL(ip_nat_protocol_unregister); #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) @@ -582,7 +591,7 @@ EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range); EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr); #endif -int __init ip_nat_init(void) +static int __init ip_nat_init(void) { size_t i; @@ -624,10 +633,14 @@ static int clean_nat(struct ip_conntrack *i, void *data) return 0; } -/* Not __exit: called from ip_nat_standalone.c:init_or_cleanup() --RR */ -void ip_nat_cleanup(void) +static void __exit ip_nat_cleanup(void) { ip_ct_iterate_cleanup(&clean_nat, NULL); ip_conntrack_destroyed = NULL; vfree(bysource); } + +MODULE_LICENSE("GPL"); + +module_init(ip_nat_init); +module_exit(ip_nat_cleanup); diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index d2dd5d313556..5d506e0564d5 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c @@ -199,6 +199,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, } return 1; } +EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); /* Generic function for mangling variable-length address changes inside * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX @@ -256,6 +257,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL(ip_nat_mangle_udp_packet); /* Adjust one found SACK option including checksum correction */ static void @@ -399,6 +401,7 @@ ip_nat_seq_adjust(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL(ip_nat_seq_adjust); /* Setup NAT on this expected conntrack so it follows master. */ /* If we fail to get a free NAT slot, we'll get dropped on confirm */ @@ -425,3 +428,4 @@ void ip_nat_follow_master(struct ip_conntrack *ct, /* hook doesn't matter, but it has to do destination manip */ ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); } +EXPORT_SYMBOL(ip_nat_follow_master); diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 0ff368b131f6..30cd4e18c129 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -108,8 +108,8 @@ ip_nat_fn(unsigned int hooknum, case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { - if (!icmp_reply_translation(pskb, ct, maniptype, - CTINFO2DIR(ctinfo))) + if (!ip_nat_icmp_reply_translation(pskb, ct, maniptype, + CTINFO2DIR(ctinfo))) return NF_DROP; else return NF_ACCEPT; @@ -152,7 +152,7 @@ ip_nat_fn(unsigned int hooknum, } IP_NF_ASSERT(info); - return nat_packet(ct, ctinfo, hooknum, pskb); + return ip_nat_packet(ct, ctinfo, hooknum, pskb); } static unsigned int @@ -325,15 +325,10 @@ static int init_or_cleanup(int init) printk("ip_nat_init: can't setup rules.\n"); goto cleanup_nothing; } - ret = ip_nat_init(); - if (ret < 0) { - printk("ip_nat_init: can't setup rules.\n"); - goto cleanup_rule_init; - } ret = nf_register_hook(&ip_nat_in_ops); if (ret < 0) { printk("ip_nat_init: can't register in hook.\n"); - goto cleanup_nat; + goto cleanup_rule_init; } ret = nf_register_hook(&ip_nat_out_ops); if (ret < 0) { @@ -374,8 +369,6 @@ static int init_or_cleanup(int init) nf_unregister_hook(&ip_nat_out_ops); cleanup_inops: nf_unregister_hook(&ip_nat_in_ops); - cleanup_nat: - ip_nat_cleanup(); cleanup_rule_init: ip_nat_rule_cleanup(); cleanup_nothing: @@ -395,14 +388,4 @@ static void __exit fini(void) module_init(init); module_exit(fini); -EXPORT_SYMBOL(ip_nat_setup_info); -EXPORT_SYMBOL(ip_nat_protocol_register); -EXPORT_SYMBOL(ip_nat_protocol_unregister); -EXPORT_SYMBOL_GPL(ip_nat_proto_find_get); -EXPORT_SYMBOL_GPL(ip_nat_proto_put); -EXPORT_SYMBOL(ip_nat_cheat_check); -EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); -EXPORT_SYMBOL(ip_nat_mangle_udp_packet); -EXPORT_SYMBOL(ip_nat_used_tuple); -EXPORT_SYMBOL(ip_nat_follow_master); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From c4a3e0a529ab3e65223e81681c7c6b1bc188fa58 Mon Sep 17 00:00:00 2001 From: "Bagalkote, Sreenivas" Date: Tue, 20 Sep 2005 17:46:58 -0400 Subject: [SCSI] MegaRAID SAS RAID: new driver Signed-off-by: Sreenivas Bagalkote Signed-off-by: James Bottomley --- drivers/scsi/Makefile | 1 + drivers/scsi/megaraid/Kconfig.megaraid | 9 + drivers/scsi/megaraid/Makefile | 1 + drivers/scsi/megaraid/megaraid_sas.c | 2805 ++++++++++++++++++++++++++++++++ drivers/scsi/megaraid/megaraid_sas.h | 1142 +++++++++++++ include/linux/pci_ids.h | 2 + 6 files changed, 3960 insertions(+) create mode 100644 drivers/scsi/megaraid/megaraid_sas.c create mode 100644 drivers/scsi/megaraid/megaraid_sas.h (limited to 'include/linux') diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1e4edbdf2730..48529d180ca8 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o obj-$(CONFIG_SCSI_DC390T) += tmscsim.o obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ +obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp.o obj-$(CONFIG_SCSI_GDTH) += gdth.o diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid index 917d591d90b2..7363e12663ac 100644 --- a/drivers/scsi/megaraid/Kconfig.megaraid +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -76,3 +76,12 @@ config MEGARAID_LEGACY To compile this driver as a module, choose M here: the module will be called megaraid endif + +config MEGARAID_SAS + tristate "LSI Logic MegaRAID SAS RAID Module" + depends on PCI && SCSI + help + Module for LSI Logic's SAS based RAID controllers. + To compile this driver as a module, choose 'm' here. + Module will be called megaraid_sas + diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile index 6dd99f275722..f469915b97c3 100644 --- a/drivers/scsi/megaraid/Makefile +++ b/drivers/scsi/megaraid/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o +obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c new file mode 100644 index 000000000000..1b3148e842af --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -0,0 +1,2805 @@ +/* + * + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2005 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_sas.c + * Version : v00.00.02.00-rc4 + * + * Authors: + * Sreenivas Bagalkote + * Sumant Patro + * + * List of supported controllers + * + * OEM Product Name VID DID SSVID SSID + * --- ------------ --- --- ---- ---- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "megaraid_sas.h" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(MEGASAS_VERSION); +MODULE_AUTHOR("sreenivas.bagalkote@lsil.com"); +MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver"); + +/* + * PCI ID table for all supported controllers + */ +static struct pci_device_id megasas_pci_table[] = { + + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_LSI_SAS1064R, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_DELL_PERC5, + PCI_ANY_ID, + PCI_ANY_ID, + }, + {0} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(pci, megasas_pci_table); + +static int megasas_mgmt_majorno; +static struct megasas_mgmt_info megasas_mgmt_info; +static struct fasync_struct *megasas_async_queue; +static DECLARE_MUTEX(megasas_async_queue_mutex); + +/** + * megasas_get_cmd - Get a command from the free pool + * @instance: Adapter soft state + * + * Returns a free command from the pool + */ +static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance + *instance) +{ + unsigned long flags; + struct megasas_cmd *cmd = NULL; + + spin_lock_irqsave(&instance->cmd_pool_lock, flags); + + if (!list_empty(&instance->cmd_pool)) { + cmd = list_entry((&instance->cmd_pool)->next, + struct megasas_cmd, list); + list_del_init(&cmd->list); + } else { + printk(KERN_ERR "megasas: Command pool empty!\n"); + } + + spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); + return cmd; +} + +/** + * megasas_return_cmd - Return a cmd to free command pool + * @instance: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +static inline void +megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->cmd_pool_lock, flags); + + cmd->scmd = NULL; + list_add_tail(&cmd->list, &instance->cmd_pool); + + spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); +} + +/** + * megasas_enable_intr - Enables interrupts + * @regs: MFI register set + */ +static inline void +megasas_enable_intr(struct megasas_register_set __iomem * regs) +{ + writel(1, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr - Disables interrupts + * @regs: MFI register set + */ +static inline void +megasas_disable_intr(struct megasas_register_set __iomem * regs) +{ + u32 mask = readl(®s->outbound_intr_mask) & (~0x00000001); + writel(mask, ®s->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_issue_polled - Issues a polling command + * @instance: Adapter soft state + * @cmd: Command packet to be issued + * + * For polling, MFI requires the cmd_status to be set to 0xFF before posting. + */ +static int +megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + int i; + u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; + + struct megasas_header *frame_hdr = &cmd->frame->hdr; + + frame_hdr->cmd_status = 0xFF; + frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + + /* + * Issue the frame using inbound queue port + */ + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + /* + * Wait for cmd_status to change + */ + for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) { + rmb(); + msleep(1); + } + + if (frame_hdr->cmd_status == 0xff) + return -ETIME; + + return 0; +} + +/** + * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds + * @instance: Adapter soft state + * @cmd: Command to be issued + * + * This function waits on an event for the command to be returned from ISR. + * Used to issue ioctl commands. + */ +static int +megasas_issue_blocked_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + cmd->cmd_status = ENODATA; + + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA)); + + return 0; +} + +/** + * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd + * @instance: Adapter soft state + * @cmd_to_abort: Previously issued cmd to be aborted + * + * MFI firmware can abort previously issued AEN comamnd (automatic event + * notification). The megasas_issue_blocked_abort_cmd() issues such abort + * cmd and blocks till it is completed. + */ +static int +megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd_to_abort) +{ + struct megasas_cmd *cmd; + struct megasas_abort_frame *abort_fr; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -1; + + abort_fr = &cmd->frame->abort; + + /* + * Prepare and issue the abort frame + */ + abort_fr->cmd = MFI_CMD_ABORT; + abort_fr->cmd_status = 0xFF; + abort_fr->flags = 0; + abort_fr->abort_context = cmd_to_abort->index; + abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; + abort_fr->abort_mfi_phys_addr_hi = 0; + + cmd->sync_cmd = 1; + cmd->cmd_status = 0xFF; + + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + /* + * Wait for this cmd to complete + */ + wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF)); + + megasas_return_cmd(instance, cmd); + return 0; +} + +/** + * megasas_make_sgl32 - Prepares 32-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static inline int +megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + /* + * Return 0 if there is no data transfer + */ + if (!scp->request_buffer || !scp->request_bufflen) + return 0; + + if (!scp->use_sg) { + mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev, + scp-> + request_buffer, + scp-> + request_bufflen, + scp-> + sc_data_direction); + mfi_sgl->sge32[0].length = scp->request_bufflen; + + return 1; + } + + os_sgl = (struct scatterlist *)scp->request_buffer; + sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg, + scp->sc_data_direction); + + for (i = 0; i < sge_count; i++, os_sgl++) { + mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); + } + + return sge_count; +} + +/** + * megasas_make_sgl64 - Prepares 64-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static inline int +megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + /* + * Return 0 if there is no data transfer + */ + if (!scp->request_buffer || !scp->request_bufflen) + return 0; + + if (!scp->use_sg) { + mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev, + scp-> + request_buffer, + scp-> + request_bufflen, + scp-> + sc_data_direction); + + mfi_sgl->sge64[0].length = scp->request_bufflen; + + return 1; + } + + os_sgl = (struct scatterlist *)scp->request_buffer; + sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg, + scp->sc_data_direction); + + for (i = 0; i < sge_count; i++, os_sgl++) { + mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); + } + + return sge_count; +} + +/** + * megasas_build_dcdb - Prepares a direct cdb (DCDB) command + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared in + * + * This function prepares CDB commands. These are typcially pass-through + * commands to the devices. + */ +static inline int +megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 sge_sz; + int sge_bytes; + u32 is_logical; + u32 device_id; + u16 flags = 0; + struct megasas_pthru_frame *pthru; + + is_logical = MEGASAS_IS_LOGICAL(scp); + device_id = MEGASAS_DEV_INDEX(instance, scp); + pthru = (struct megasas_pthru_frame *)cmd->frame; + + if (scp->sc_data_direction == PCI_DMA_TODEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) + flags = MFI_FRAME_DIR_READ; + else if (scp->sc_data_direction == PCI_DMA_NONE) + flags = MFI_FRAME_DIR_NONE; + + /* + * Prepare the DCDB frame + */ + pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; + pthru->cmd_status = 0x0; + pthru->scsi_status = 0x0; + pthru->target_id = device_id; + pthru->lun = scp->device->lun; + pthru->cdb_len = scp->cmd_len; + pthru->timeout = 0; + pthru->flags = flags; + pthru->data_xfer_len = scp->request_bufflen; + + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + /* + * Construct SGL + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + if (IS_DMA64) { + pthru->flags |= MFI_FRAME_SGL64; + pthru->sge_count = megasas_make_sgl64(instance, scp, + &pthru->sgl); + } else + pthru->sge_count = megasas_make_sgl32(instance, scp, + &pthru->sgl); + + /* + * Sense info specific + */ + pthru->sense_len = SCSI_SENSE_BUFFERSIZE; + pthru->sense_buf_phys_addr_hi = 0; + pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + + sge_bytes = sge_sz * pthru->sge_count; + + /* + * Compute the total number of frames this command consumes. FW uses + * this number to pull sufficient number of frames from host memory. + */ + cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; + + if (cmd->frame_count > 7) + cmd->frame_count = 8; + + return cmd->frame_count; +} + +/** + * megasas_build_ldio - Prepares IOs to logical devices + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to to be prepared + * + * Frames (and accompanying SGLs) for regular SCSI IOs use this function. + */ +static inline int +megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 sge_sz; + int sge_bytes; + u32 device_id; + u8 sc = scp->cmnd[0]; + u16 flags = 0; + struct megasas_io_frame *ldio; + + device_id = MEGASAS_DEV_INDEX(instance, scp); + ldio = (struct megasas_io_frame *)cmd->frame; + + if (scp->sc_data_direction == PCI_DMA_TODEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) + flags = MFI_FRAME_DIR_READ; + + /* + * Preare the Logical IO frame: 2nd bit is zero for all read cmds + */ + ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; + ldio->cmd_status = 0x0; + ldio->scsi_status = 0x0; + ldio->target_id = device_id; + ldio->timeout = 0; + ldio->reserved_0 = 0; + ldio->pad_0 = 0; + ldio->flags = flags; + ldio->start_lba_hi = 0; + ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + ldio->lba_count = (u32) scp->cmnd[4]; + ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; + + ldio->start_lba_lo &= 0x1FFFFF; + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + ldio->lba_count = (u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8); + ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + ldio->lba_count = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + + ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 16-byte READ(0x88) or WRITE(0x8A) cdb + */ + else if (scp->cmd_len == 16) { + ldio->lba_count = ((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; + + ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + + ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + + } + + /* + * Construct SGL + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + if (IS_DMA64) { + ldio->flags |= MFI_FRAME_SGL64; + ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); + } else + ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); + + /* + * Sense info specific + */ + ldio->sense_len = SCSI_SENSE_BUFFERSIZE; + ldio->sense_buf_phys_addr_hi = 0; + ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + + sge_bytes = sge_sz * ldio->sge_count; + + cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; + + if (cmd->frame_count > 7) + cmd->frame_count = 8; + + return cmd->frame_count; +} + +/** + * megasas_build_cmd - Prepares a command packet + * @instance: Adapter soft state + * @scp: SCSI command + * @frame_count: [OUT] Number of frames used to prepare this command + */ +static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance + *instance, + struct scsi_cmnd *scp, + int *frame_count) +{ + u32 logical_cmd; + struct megasas_cmd *cmd; + + /* + * Find out if this is logical or physical drive command. + */ + logical_cmd = MEGASAS_IS_LOGICAL(scp); + + /* + * Logical drive command + */ + if (logical_cmd) { + + if (scp->device->id >= MEGASAS_MAX_LD) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + switch (scp->cmnd[0]) { + + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_6: + case WRITE_6: + case READ_16: + case WRITE_16: + /* + * Fail for LUN > 0 + */ + if (scp->device->lun) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_ldio(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + + default: + /* + * Fail for LUN > 0 + */ + if (scp->device->lun) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_dcdb(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + } + } else { + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_dcdb(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + } + + return NULL; +} + +/** + * megasas_queue_command - Queue entry point + * @scmd: SCSI command to be queued + * @done: Callback entry point + */ +static int +megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) +{ + u32 frame_count; + unsigned long flags; + struct megasas_cmd *cmd; + struct megasas_instance *instance; + + instance = (struct megasas_instance *) + scmd->device->host->hostdata; + scmd->scsi_done = done; + scmd->result = 0; + + cmd = megasas_build_cmd(instance, scmd, &frame_count); + + if (!cmd) { + done(scmd); + return 0; + } + + cmd->scmd = scmd; + scmd->SCp.ptr = (char *)cmd; + scmd->SCp.sent_command = jiffies; + + /* + * Issue the command to the FW + */ + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding++; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)), + &instance->reg_set->inbound_queue_port); + + return 0; +} + +/** + * megasas_wait_for_outstanding - Wait for all outstanding cmds + * @instance: Adapter soft state + * + * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to + * complete all its outstanding commands. Returns error if one or more IOs + * are pending after this time period. It also marks the controller dead. + */ +static int megasas_wait_for_outstanding(struct megasas_instance *instance) +{ + int i; + u32 wait_time = MEGASAS_RESET_WAIT_TIME; + + for (i = 0; i < wait_time; i++) { + + if (!instance->fw_outstanding) + break; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + printk(KERN_NOTICE "megasas: [%2d]waiting for %d " + "commands to complete\n", i, + instance->fw_outstanding); + } + + msleep(1000); + } + + if (instance->fw_outstanding) { + instance->hw_crit_error = 1; + return FAILED; + } + + return SUCCESS; +} + +/** + * megasas_generic_reset - Generic reset routine + * @scmd: Mid-layer SCSI command + * + * This routine implements a generic reset handler for device, bus and host + * reset requests. Device, bus and host specific reset handlers can use this + * function after they do their specific tasks. + */ +static int megasas_generic_reset(struct scsi_cmnd *scmd) +{ + int ret_val; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x \n", + scmd->serial_number, scmd->cmnd[0], scmd->device->channel, + scmd->device->id, scmd->device->lun); + + if (instance->hw_crit_error) { + printk(KERN_ERR "megasas: cannot recover from previous reset " + "failures\n"); + return FAILED; + } + + spin_unlock(scmd->device->host->host_lock); + + ret_val = megasas_wait_for_outstanding(instance); + + if (ret_val == SUCCESS) + printk(KERN_NOTICE "megasas: reset successful \n"); + else + printk(KERN_ERR "megasas: failed to do reset\n"); + + spin_lock(scmd->device->host->host_lock); + + return ret_val; +} + +static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) +{ + unsigned long seconds; + + if (scmd->SCp.ptr) { + seconds = (jiffies - scmd->SCp.sent_command) / HZ; + + if (seconds < 90) { + return EH_RESET_TIMER; + } else { + return EH_NOT_HANDLED; + } + } + + return EH_HANDLED; +} + +/** + * megasas_reset_device - Device reset handler entry point + */ +static int megasas_reset_device(struct scsi_cmnd *scmd) +{ + int ret; + + /* + * First wait for all commands to complete + */ + ret = megasas_generic_reset(scmd); + + return ret; +} + +/** + * megasas_reset_bus_host - Bus & host reset handler entry point + */ +static int megasas_reset_bus_host(struct scsi_cmnd *scmd) +{ + int ret; + + /* + * Frist wait for all commands to complete + */ + ret = megasas_generic_reset(scmd); + + return ret; +} + +/** + * megasas_service_aen - Processes an event notification + * @instance: Adapter soft state + * @cmd: AEN command completed by the ISR + * + * For AEN, driver sends a command down to FW that is held by the FW till an + * event occurs. When an event of interest occurs, FW completes the command + * that it was previously holding. + * + * This routines sends SIGIO signal to processes that have registered with the + * driver for AEN. + */ +static void +megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + /* + * Don't signal app if it is just an aborted previously registered aen + */ + if (!cmd->abort_aen) + kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); + else + cmd->abort_aen = 0; + + instance->aen_cmd = NULL; + megasas_return_cmd(instance, cmd); +} + +/* + * Scsi host template for megaraid_sas driver + */ +static struct scsi_host_template megasas_template = { + + .module = THIS_MODULE, + .name = "LSI Logic SAS based MegaRAID driver", + .proc_name = "megaraid_sas", + .queuecommand = megasas_queue_command, + .eh_device_reset_handler = megasas_reset_device, + .eh_bus_reset_handler = megasas_reset_bus_host, + .eh_host_reset_handler = megasas_reset_bus_host, + .eh_timed_out = megasas_reset_timer, + .use_clustering = ENABLE_CLUSTERING, +}; + +/** + * megasas_complete_int_cmd - Completes an internal command + * @instance: Adapter soft state + * @cmd: Command to be completed + * + * The megasas_issue_blocked_cmd() function waits for a command to complete + * after it issues a command. This function wakes up that waiting routine by + * calling wake_up() on the wait queue. + */ +static void +megasas_complete_int_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + cmd->cmd_status = cmd->frame->io.cmd_status; + + if (cmd->cmd_status == ENODATA) { + cmd->cmd_status = 0; + } + wake_up(&instance->int_cmd_wait_q); +} + +/** + * megasas_complete_abort - Completes aborting a command + * @instance: Adapter soft state + * @cmd: Cmd that was issued to abort another cmd + * + * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q + * after it issues an abort on a previously issued command. This function + * wakes up all functions waiting on the same wait queue. + */ +static void +megasas_complete_abort(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + cmd->cmd_status = 0; + wake_up(&instance->abort_cmd_wait_q); + } + + return; +} + +/** + * megasas_unmap_sgbuf - Unmap SG buffers + * @instance: Adapter soft state + * @cmd: Completed command + */ +static inline void +megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + dma_addr_t buf_h; + u8 opcode; + + if (cmd->scmd->use_sg) { + pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer, + cmd->scmd->use_sg, cmd->scmd->sc_data_direction); + return; + } + + if (!cmd->scmd->request_bufflen) + return; + + opcode = cmd->frame->hdr.cmd; + + if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) { + if (IS_DMA64) + buf_h = cmd->frame->io.sgl.sge64[0].phys_addr; + else + buf_h = cmd->frame->io.sgl.sge32[0].phys_addr; + } else { + if (IS_DMA64) + buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr; + else + buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr; + } + + pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen, + cmd->scmd->sc_data_direction); + return; +} + +/** + * megasas_complete_cmd - Completes a command + * @instance: Adapter soft state + * @cmd: Command to be completed + * @alt_status: If non-zero, use this value as status to + * SCSI mid-layer instead of the value returned + * by the FW. This should be used if caller wants + * an alternate status (as in the case of aborted + * commands) + */ +static inline void +megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, + u8 alt_status) +{ + int exception = 0; + struct megasas_header *hdr = &cmd->frame->hdr; + unsigned long flags; + + if (cmd->scmd) { + cmd->scmd->SCp.ptr = (char *)0; + } + + switch (hdr->cmd) { + + case MFI_CMD_PD_SCSI_IO: + case MFI_CMD_LD_SCSI_IO: + + /* + * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been + * issued either through an IO path or an IOCTL path. If it + * was via IOCTL, we will send it to internal completion. + */ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + megasas_complete_int_cmd(instance, cmd); + break; + } + + /* + * Don't export physical disk devices to mid-layer. + */ + if (!MEGASAS_IS_LOGICAL(cmd->scmd) && + (hdr->cmd_status == MFI_STAT_OK) && + (cmd->scmd->cmnd[0] == INQUIRY)) { + + if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) == + TYPE_DISK) { + cmd->scmd->result = DID_BAD_TARGET << 16; + exception = 1; + } + } + + case MFI_CMD_LD_READ: + case MFI_CMD_LD_WRITE: + + if (alt_status) { + cmd->scmd->result = alt_status << 16; + exception = 1; + } + + if (exception) { + + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding--; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + megasas_unmap_sgbuf(instance, cmd); + cmd->scmd->scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + } + + switch (hdr->cmd_status) { + + case MFI_STAT_OK: + cmd->scmd->result = DID_OK << 16; + break; + + case MFI_STAT_SCSI_IO_FAILED: + case MFI_STAT_LD_INIT_IN_PROGRESS: + cmd->scmd->result = + (DID_ERROR << 16) | hdr->scsi_status; + break; + + case MFI_STAT_SCSI_DONE_WITH_ERROR: + + cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; + + if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { + memset(cmd->scmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->scmd->sense_buffer, cmd->sense, + hdr->sense_len); + + cmd->scmd->result |= DRIVER_SENSE << 24; + } + + break; + + case MFI_STAT_LD_OFFLINE: + case MFI_STAT_DEVICE_NOT_FOUND: + cmd->scmd->result = DID_BAD_TARGET << 16; + break; + + default: + printk(KERN_DEBUG "megasas: MFI FW status %#x\n", + hdr->cmd_status); + cmd->scmd->result = DID_ERROR << 16; + break; + } + + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding--; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + megasas_unmap_sgbuf(instance, cmd); + cmd->scmd->scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + + case MFI_CMD_SMP: + case MFI_CMD_STP: + case MFI_CMD_DCMD: + + /* + * See if got an event notification + */ + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) + megasas_service_aen(instance, cmd); + else + megasas_complete_int_cmd(instance, cmd); + + break; + + case MFI_CMD_ABORT: + /* + * Cmd issued to abort another cmd returned + */ + megasas_complete_abort(instance, cmd); + break; + + default: + printk("megasas: Unknown command completed! [0x%X]\n", + hdr->cmd); + break; + } +} + +/** + * megasas_deplete_reply_queue - Processes all completed commands + * @instance: Adapter soft state + * @alt_status: Alternate status to be returned to + * SCSI mid-layer instead of the status + * returned by the FW + */ +static inline int +megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) +{ + u32 status; + u32 producer; + u32 consumer; + u32 context; + struct megasas_cmd *cmd; + + /* + * Check if it is our interrupt + */ + status = readl(&instance->reg_set->outbound_intr_status); + + if (!(status & MFI_OB_INTR_STATUS_MASK)) { + return IRQ_NONE; + } + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, &instance->reg_set->outbound_intr_status); + + producer = *instance->producer; + consumer = *instance->consumer; + + while (consumer != producer) { + context = instance->reply_queue[consumer]; + + cmd = instance->cmd_list[context]; + + megasas_complete_cmd(instance, cmd, alt_status); + + consumer++; + if (consumer == (instance->max_fw_cmds + 1)) { + consumer = 0; + } + } + + *instance->consumer = producer; + + return IRQ_HANDLED; +} + +/** + * megasas_isr - isr entry point + */ +static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs) +{ + return megasas_deplete_reply_queue((struct megasas_instance *)devp, + DID_OK); +} + +/** + * megasas_transition_to_ready - Move the FW to READY state + * @reg_set: MFI register set + * + * During the initialization, FW passes can potentially be in any one of + * several possible states. If the FW in operational, waiting-for-handshake + * states, driver must take steps to bring it to ready state. Otherwise, it + * has to wait for the ready state. + */ +static int +megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set) +{ + int i; + u8 max_wait; + u32 fw_state; + u32 cur_state; + + fw_state = readl(®_set->outbound_msg_0) & MFI_STATE_MASK; + + while (fw_state != MFI_STATE_READY) { + + printk(KERN_INFO "megasas: Waiting for FW to come to ready" + " state\n"); + switch (fw_state) { + + case MFI_STATE_FAULT: + + printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); + return -ENODEV; + + case MFI_STATE_WAIT_HANDSHAKE: + /* + * Set the CLR bit in inbound doorbell + */ + writel(MFI_INIT_CLEAR_HANDSHAKE, + ®_set->inbound_doorbell); + + max_wait = 2; + cur_state = MFI_STATE_WAIT_HANDSHAKE; + break; + + case MFI_STATE_OPERATIONAL: + /* + * Bring it to READY state; assuming max wait 2 secs + */ + megasas_disable_intr(reg_set); + writel(MFI_INIT_READY, ®_set->inbound_doorbell); + + max_wait = 10; + cur_state = MFI_STATE_OPERATIONAL; + break; + + case MFI_STATE_UNDEFINED: + /* + * This state should not last for more than 2 seconds + */ + max_wait = 2; + cur_state = MFI_STATE_UNDEFINED; + break; + + case MFI_STATE_BB_INIT: + max_wait = 2; + cur_state = MFI_STATE_BB_INIT; + break; + + case MFI_STATE_FW_INIT: + max_wait = 20; + cur_state = MFI_STATE_FW_INIT; + break; + + case MFI_STATE_FW_INIT_2: + max_wait = 20; + cur_state = MFI_STATE_FW_INIT_2; + break; + + case MFI_STATE_DEVICE_SCAN: + max_wait = 20; + cur_state = MFI_STATE_DEVICE_SCAN; + break; + + case MFI_STATE_FLUSH_CACHE: + max_wait = 20; + cur_state = MFI_STATE_FLUSH_CACHE; + break; + + default: + printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", + fw_state); + return -ENODEV; + } + + /* + * The cur_state should not last for more than max_wait secs + */ + for (i = 0; i < (max_wait * 1000); i++) { + fw_state = MFI_STATE_MASK & + readl(®_set->outbound_msg_0); + + if (fw_state == cur_state) { + msleep(1); + } else + break; + } + + /* + * Return error if fw_state hasn't changed after max_wait + */ + if (fw_state == cur_state) { + printk(KERN_DEBUG "FW state [%d] hasn't changed " + "in %d secs\n", fw_state, max_wait); + return -ENODEV; + } + }; + + return 0; +} + +/** + * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool + * @instance: Adapter soft state + */ +static void megasas_teardown_frame_pool(struct megasas_instance *instance) +{ + int i; + u32 max_cmd = instance->max_fw_cmds; + struct megasas_cmd *cmd; + + if (!instance->frame_dma_pool) + return; + + /* + * Return all frames to pool + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + if (cmd->frame) + pci_pool_free(instance->frame_dma_pool, cmd->frame, + cmd->frame_phys_addr); + + if (cmd->sense) + pci_pool_free(instance->sense_dma_pool, cmd->frame, + cmd->sense_phys_addr); + } + + /* + * Now destroy the pool itself + */ + pci_pool_destroy(instance->frame_dma_pool); + pci_pool_destroy(instance->sense_dma_pool); + + instance->frame_dma_pool = NULL; + instance->sense_dma_pool = NULL; +} + +/** + * megasas_create_frame_pool - Creates DMA pool for cmd frames + * @instance: Adapter soft state + * + * Each command packet has an embedded DMA memory buffer that is used for + * filling MFI frame and the SG list that immediately follows the frame. This + * function creates those DMA memory buffers for each command packet by using + * PCI pool facility. + */ +static int megasas_create_frame_pool(struct megasas_instance *instance) +{ + int i; + u32 max_cmd; + u32 sge_sz; + u32 sgl_sz; + u32 total_sz; + u32 frame_count; + struct megasas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + + /* + * Size of our frame is 64 bytes for MFI frame, followed by max SG + * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + /* + * Calculated the number of 64byte frames required for SGL + */ + sgl_sz = sge_sz * instance->max_num_sge; + frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; + + /* + * We need one extra frame for the MFI command + */ + frame_count++; + + total_sz = MEGAMFI_FRAME_SIZE * frame_count; + /* + * Use DMA pool facility provided by PCI layer + */ + instance->frame_dma_pool = pci_pool_create("megasas frame pool", + instance->pdev, total_sz, 64, + 0); + + if (!instance->frame_dma_pool) { + printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); + return -ENOMEM; + } + + instance->sense_dma_pool = pci_pool_create("megasas sense pool", + instance->pdev, 128, 4, 0); + + if (!instance->sense_dma_pool) { + printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); + + pci_pool_destroy(instance->frame_dma_pool); + instance->frame_dma_pool = NULL; + + return -ENOMEM; + } + + /* + * Allocate and attach a frame to each of the commands in cmd_list. + * By making cmd->index as the context instead of the &cmd, we can + * always use 32bit context regardless of the architecture + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + cmd->frame = pci_pool_alloc(instance->frame_dma_pool, + GFP_KERNEL, &cmd->frame_phys_addr); + + cmd->sense = pci_pool_alloc(instance->sense_dma_pool, + GFP_KERNEL, &cmd->sense_phys_addr); + + /* + * megasas_teardown_frame_pool() takes care of freeing + * whatever has been allocated + */ + if (!cmd->frame || !cmd->sense) { + printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); + megasas_teardown_frame_pool(instance); + return -ENOMEM; + } + + cmd->frame->io.context = cmd->index; + } + + return 0; +} + +/** + * megasas_free_cmds - Free all the cmds in the free cmd pool + * @instance: Adapter soft state + */ +static void megasas_free_cmds(struct megasas_instance *instance) +{ + int i; + /* First free the MFI frame pool */ + megasas_teardown_frame_pool(instance); + + /* Free all the commands in the cmd_list */ + for (i = 0; i < instance->max_fw_cmds; i++) + kfree(instance->cmd_list[i]); + + /* Free the cmd_list buffer itself */ + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + INIT_LIST_HEAD(&instance->cmd_pool); +} + +/** + * megasas_alloc_cmds - Allocates the command packets + * @instance: Adapter soft state + * + * Each command that is issued to the FW, whether IO commands from the OS or + * internal commands like IOCTLs, are wrapped in local data structure called + * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to + * the FW. + * + * Each frame has a 32-bit field called context (tag). This context is used + * to get back the megasas_cmd from the frame when a frame gets completed in + * the ISR. Typically the address of the megasas_cmd itself would be used as + * the context. But we wanted to keep the differences between 32 and 64 bit + * systems to the mininum. We always use 32 bit integers for the context. In + * this driver, the 32 bit values are the indices into an array cmd_list. + * This array is used only to look up the megasas_cmd given the context. The + * free commands themselves are maintained in a linked list called cmd_pool. + */ +static int megasas_alloc_cmds(struct megasas_instance *instance) +{ + int i; + int j; + u32 max_cmd; + struct megasas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + + /* + * instance->cmd_list is an array of struct megasas_cmd pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd, + GFP_KERNEL); + + if (!instance->cmd_list) { + printk(KERN_DEBUG "megasas: out of memory\n"); + return -ENOMEM; + } + + memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd); + + for (i = 0; i < max_cmd; i++) { + instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), + GFP_KERNEL); + + if (!instance->cmd_list[i]) { + + for (j = 0; j < i; j++) + kfree(instance->cmd_list[j]); + + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + return -ENOMEM; + } + } + + /* + * Add all the commands to command pool (instance->cmd_pool) + */ + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + memset(cmd, 0, sizeof(struct megasas_cmd)); + cmd->index = i; + cmd->instance = instance; + + list_add_tail(&cmd->list, &instance->cmd_pool); + } + + /* + * Create a frame pool and assign one frame to each cmd + */ + if (megasas_create_frame_pool(instance)) { + printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); + megasas_free_cmds(instance); + } + + return 0; +} + +/** + * megasas_get_controller_info - Returns FW's controller structure + * @instance: Adapter soft state + * @ctrl_info: Controller information structure + * + * Issues an internal command (DCMD) to get the FW's controller structure. + * This information is mainly used to find out the maximum IO transfer per + * command supported by the FW. + */ +static int +megasas_get_ctrl_info(struct megasas_instance *instance, + struct megasas_ctrl_info *ctrl_info) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_ctrl_info *ci; + dma_addr_t ci_h = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + ci = pci_alloc_consistent(instance->pdev, + sizeof(struct megasas_ctrl_info), &ci_h); + + if (!ci) { + printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); + dcmd->opcode = MR_DCMD_CTRL_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = ci_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); + + if (!megasas_issue_polled(instance, cmd)) { + ret = 0; + memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); + } else { + ret = -1; + } + + pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), + ci, ci_h); + + megasas_return_cmd(instance, cmd); + return ret; +} + +/** + * megasas_init_mfi - Initializes the FW + * @instance: Adapter soft state + * + * This is the main function for initializing MFI firmware. + */ +static int megasas_init_mfi(struct megasas_instance *instance) +{ + u32 context_sz; + u32 reply_q_sz; + u32 max_sectors_1; + u32 max_sectors_2; + struct megasas_register_set __iomem *reg_set; + + struct megasas_cmd *cmd; + struct megasas_ctrl_info *ctrl_info; + + struct megasas_init_frame *init_frame; + struct megasas_init_queue_info *initq_info; + dma_addr_t init_frame_h; + dma_addr_t initq_info_h; + + /* + * Map the message registers + */ + instance->base_addr = pci_resource_start(instance->pdev, 0); + + if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) { + printk(KERN_DEBUG "megasas: IO memory region busy!\n"); + return -EBUSY; + } + + instance->reg_set = ioremap_nocache(instance->base_addr, 8192); + + if (!instance->reg_set) { + printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); + goto fail_ioremap; + } + + reg_set = instance->reg_set; + + /* + * We expect the FW state to be READY + */ + if (megasas_transition_to_ready(instance->reg_set)) + goto fail_ready_state; + + /* + * Get various operational parameters from status register + */ + instance->max_fw_cmds = readl(®_set->outbound_msg_0) & 0x00FFFF; + instance->max_num_sge = (readl(®_set->outbound_msg_0) & 0xFF0000) >> + 0x10; + /* + * Create a pool of commands + */ + if (megasas_alloc_cmds(instance)) + goto fail_alloc_cmds; + + /* + * Allocate memory for reply queue. Length of reply queue should + * be _one_ more than the maximum commands handled by the firmware. + * + * Note: When FW completes commands, it places corresponding contex + * values in this circular reply queue. This circular queue is a fairly + * typical producer-consumer queue. FW is the producer (of completed + * commands) and the driver is the consumer. + */ + context_sz = sizeof(u32); + reply_q_sz = context_sz * (instance->max_fw_cmds + 1); + + instance->reply_queue = pci_alloc_consistent(instance->pdev, + reply_q_sz, + &instance->reply_queue_h); + + if (!instance->reply_queue) { + printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); + goto fail_reply_queue; + } + + /* + * Prepare a init frame. Note the init frame points to queue info + * structure. Each frame has SGL allocated after first 64 bytes. For + * this frame - since we don't need any SGL - we use SGL's space as + * queue info structure + * + * We will not get a NULL command below. We just created the pool. + */ + cmd = megasas_get_cmd(instance); + + init_frame = (struct megasas_init_frame *)cmd->frame; + initq_info = (struct megasas_init_queue_info *) + ((unsigned long)init_frame + 64); + + init_frame_h = cmd->frame_phys_addr; + initq_info_h = init_frame_h + 64; + + memset(init_frame, 0, MEGAMFI_FRAME_SIZE); + memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); + + initq_info->reply_queue_entries = instance->max_fw_cmds + 1; + initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; + + initq_info->producer_index_phys_addr_lo = instance->producer_h; + initq_info->consumer_index_phys_addr_lo = instance->consumer_h; + + init_frame->cmd = MFI_CMD_INIT; + init_frame->cmd_status = 0xFF; + init_frame->queue_info_new_phys_addr_lo = initq_info_h; + + init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); + + /* + * Issue the init frame in polled mode + */ + if (megasas_issue_polled(instance, cmd)) { + printk(KERN_DEBUG "megasas: Failed to init firmware\n"); + goto fail_fw_init; + } + + megasas_return_cmd(instance, cmd); + + ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); + + /* + * Compute the max allowed sectors per IO: The controller info has two + * limits on max sectors. Driver should use the minimum of these two. + * + * 1 << stripe_sz_ops.min = max sectors per strip + * + * Note that older firmwares ( < FW ver 30) didn't report information + * to calculate max_sectors_1. So the number ended up as zero always. + */ + if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { + + max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * + ctrl_info->max_strips_per_io; + max_sectors_2 = ctrl_info->max_request_size; + + instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2) + ? max_sectors_1 : max_sectors_2; + } else + instance->max_sectors_per_req = instance->max_num_sge * + PAGE_SIZE / 512; + + kfree(ctrl_info); + + return 0; + + fail_fw_init: + megasas_return_cmd(instance, cmd); + + pci_free_consistent(instance->pdev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); + fail_reply_queue: + megasas_free_cmds(instance); + + fail_alloc_cmds: + fail_ready_state: + iounmap(instance->reg_set); + + fail_ioremap: + pci_release_regions(instance->pdev); + + return -EINVAL; +} + +/** + * megasas_release_mfi - Reverses the FW initialization + * @intance: Adapter soft state + */ +static void megasas_release_mfi(struct megasas_instance *instance) +{ + u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1); + + pci_free_consistent(instance->pdev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); + + megasas_free_cmds(instance); + + iounmap(instance->reg_set); + + pci_release_regions(instance->pdev); +} + +/** + * megasas_get_seq_num - Gets latest event sequence numbers + * @instance: Adapter soft state + * @eli: FW event log sequence numbers information + * + * FW maintains a log of all events in a non-volatile area. Upper layers would + * usually find out the latest sequence number of the events, the seq number at + * the boot etc. They would "read" all the events below the latest seq number + * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq + * number), they would subsribe to AEN (asynchronous event notification) and + * wait for the events to happen. + */ +static int +megasas_get_seq_num(struct megasas_instance *instance, + struct megasas_evt_log_info *eli) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_evt_log_info *el_info; + dma_addr_t el_info_h = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + el_info = pci_alloc_consistent(instance->pdev, + sizeof(struct megasas_evt_log_info), + &el_info_h); + + if (!el_info) { + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(el_info, 0, sizeof(*el_info)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); + dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = el_info_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); + + megasas_issue_blocked_cmd(instance, cmd); + + /* + * Copy the data back into callers buffer + */ + memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); + + pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), + el_info, el_info_h); + + megasas_return_cmd(instance, cmd); + + return 0; +} + +/** + * megasas_register_aen - Registers for asynchronous event notification + * @instance: Adapter soft state + * @seq_num: The starting sequence number + * @class_locale: Class of the event + * + * This function subscribes for AEN for events beyond the @seq_num. It requests + * to be notified if and only if the event is of type @class_locale + */ +static int +megasas_register_aen(struct megasas_instance *instance, u32 seq_num, + u32 class_locale_word) +{ + int ret_val; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + union megasas_evt_class_locale curr_aen; + union megasas_evt_class_locale prev_aen; + + /* + * If there an AEN pending already (aen_cmd), check if the + * class_locale of that pending AEN is inclusive of the new + * AEN request we currently have. If it is, then we don't have + * to do anything. In other words, whichever events the current + * AEN request is subscribing to, have already been subscribed + * to. + * + * If the old_cmd is _not_ inclusive, then we have to abort + * that command, form a class_locale that is superset of both + * old and current and re-issue to the FW + */ + + curr_aen.word = class_locale_word; + + if (instance->aen_cmd) { + + prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; + + /* + * A class whose enum value is smaller is inclusive of all + * higher values. If a PROGRESS (= -1) was previously + * registered, then a new registration requests for higher + * classes need not be sent to FW. They are automatically + * included. + * + * Locale numbers don't have such hierarchy. They are bitmap + * values + */ + if ((prev_aen.members.class <= curr_aen.members.class) && + !((prev_aen.members.locale & curr_aen.members.locale) ^ + curr_aen.members.locale)) { + /* + * Previously issued event registration includes + * current request. Nothing to do. + */ + return 0; + } else { + curr_aen.members.locale |= prev_aen.members.locale; + + if (prev_aen.members.class < curr_aen.members.class) + curr_aen.members.class = prev_aen.members.class; + + instance->aen_cmd->abort_aen = 1; + ret_val = megasas_issue_blocked_abort_cmd(instance, + instance-> + aen_cmd); + + if (ret_val) { + printk(KERN_DEBUG "megasas: Failed to abort " + "previous AEN command\n"); + return ret_val; + } + } + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -ENOMEM; + + dcmd = &cmd->frame->dcmd; + + memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); + + /* + * Prepare DCMD for aen registration + */ + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); + dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; + dcmd->mbox.w[0] = seq_num; + dcmd->mbox.w[1] = curr_aen.word; + dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); + + /* + * Store reference to the cmd used to register for AEN. When an + * application wants us to register for AEN, we have to abort this + * cmd and re-register with a new EVENT LOCALE supplied by that app + */ + instance->aen_cmd = cmd; + + /* + * Issue the aen registration frame + */ + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + return 0; +} + +/** + * megasas_start_aen - Subscribes to AEN during driver load time + * @instance: Adapter soft state + */ +static int megasas_start_aen(struct megasas_instance *instance) +{ + struct megasas_evt_log_info eli; + union megasas_evt_class_locale class_locale; + + /* + * Get the latest sequence number from FW + */ + memset(&eli, 0, sizeof(eli)); + + if (megasas_get_seq_num(instance, &eli)) + return -1; + + /* + * Register AEN with FW for latest sequence number plus 1 + */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + return megasas_register_aen(instance, eli.newest_seq_num + 1, + class_locale.word); +} + +/** + * megasas_io_attach - Attaches this driver to SCSI mid-layer + * @instance: Adapter soft state + */ +static int megasas_io_attach(struct megasas_instance *instance) +{ + struct Scsi_Host *host = instance->host; + + /* + * Export parameters required by SCSI mid-layer + */ + host->irq = instance->pdev->irq; + host->unique_id = instance->unique_id; + host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; + host->this_id = instance->init_id; + host->sg_tablesize = instance->max_num_sge; + host->max_sectors = instance->max_sectors_per_req; + host->cmd_per_lun = 128; + host->max_channel = MEGASAS_MAX_CHANNELS - 1; + host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; + host->max_lun = MEGASAS_MAX_LUN; + + /* + * Notify the mid-layer about the new controller + */ + if (scsi_add_host(host, &instance->pdev->dev)) { + printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); + return -ENODEV; + } + + /* + * Trigger SCSI to scan our drives + */ + scsi_scan_host(host); + return 0; +} + +/** + * megasas_probe_one - PCI hotplug entry point + * @pdev: PCI device structure + * @id: PCI ids of supported hotplugged adapter + */ +static int __devinit +megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int rval; + struct Scsi_Host *host; + struct megasas_instance *instance; + + /* + * Announce PCI information + */ + printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + printk("bus %d:slot %d:func %d\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + /* + * PCI prepping: enable device set bus mastering and dma mask + */ + rval = pci_enable_device(pdev); + + if (rval) { + return rval; + } + + pci_set_master(pdev); + + /* + * All our contollers are capable of performing 64-bit DMA + */ + if (IS_DMA64) { + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) { + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) + goto fail_set_dma_mask; + } + } else { + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) + goto fail_set_dma_mask; + } + + host = scsi_host_alloc(&megasas_template, + sizeof(struct megasas_instance)); + + if (!host) { + printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); + goto fail_alloc_instance; + } + + instance = (struct megasas_instance *)host->hostdata; + memset(instance, 0, sizeof(*instance)); + + instance->producer = pci_alloc_consistent(pdev, sizeof(u32), + &instance->producer_h); + instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), + &instance->consumer_h); + + if (!instance->producer || !instance->consumer) { + printk(KERN_DEBUG "megasas: Failed to allocate memory for " + "producer, consumer\n"); + goto fail_alloc_dma_buf; + } + + *instance->producer = 0; + *instance->consumer = 0; + + instance->evt_detail = pci_alloc_consistent(pdev, + sizeof(struct + megasas_evt_detail), + &instance->evt_detail_h); + + if (!instance->evt_detail) { + printk(KERN_DEBUG "megasas: Failed to allocate memory for " + "event detail structure\n"); + goto fail_alloc_dma_buf; + } + + /* + * Initialize locks and queues + */ + INIT_LIST_HEAD(&instance->cmd_pool); + + init_waitqueue_head(&instance->int_cmd_wait_q); + init_waitqueue_head(&instance->abort_cmd_wait_q); + + spin_lock_init(&instance->cmd_pool_lock); + spin_lock_init(&instance->instance_lock); + + sema_init(&instance->aen_mutex, 1); + sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); + + /* + * Initialize PCI related and misc parameters + */ + instance->pdev = pdev; + instance->host = host; + instance->unique_id = pdev->bus->number << 8 | pdev->devfn; + instance->init_id = MEGASAS_DEFAULT_INIT_ID; + + /* + * Initialize MFI Firmware + */ + if (megasas_init_mfi(instance)) + goto fail_init_mfi; + + /* + * Register IRQ + */ + if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) { + printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); + goto fail_irq; + } + + megasas_enable_intr(instance->reg_set); + + /* + * Store instance in PCI softstate + */ + pci_set_drvdata(pdev, instance); + + /* + * Add this controller to megasas_mgmt_info structure so that it + * can be exported to management applications + */ + megasas_mgmt_info.count++; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; + megasas_mgmt_info.max_index++; + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + if (megasas_start_aen(instance)) { + printk(KERN_DEBUG "megasas: start aen failed\n"); + goto fail_start_aen; + } + + /* + * Register with SCSI mid-layer + */ + if (megasas_io_attach(instance)) + goto fail_io_attach; + + return 0; + + fail_start_aen: + fail_io_attach: + megasas_mgmt_info.count--; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; + megasas_mgmt_info.max_index--; + + pci_set_drvdata(pdev, NULL); + megasas_disable_intr(instance->reg_set); + free_irq(instance->pdev->irq, instance); + + megasas_release_mfi(instance); + + fail_irq: + fail_init_mfi: + fail_alloc_dma_buf: + if (instance->evt_detail) + pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), + instance->evt_detail, + instance->evt_detail_h); + + if (instance->producer) + pci_free_consistent(pdev, sizeof(u32), instance->producer, + instance->producer_h); + if (instance->consumer) + pci_free_consistent(pdev, sizeof(u32), instance->consumer, + instance->consumer_h); + scsi_host_put(host); + + fail_alloc_instance: + fail_set_dma_mask: + pci_disable_device(pdev); + + return -ENODEV; +} + +/** + * megasas_flush_cache - Requests FW to flush all its caches + * @instance: Adapter soft state + */ +static void megasas_flush_cache(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->timeout = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; + dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; + + megasas_issue_blocked_cmd(instance, cmd); + + megasas_return_cmd(instance, cmd); + + return; +} + +/** + * megasas_shutdown_controller - Instructs FW to shutdown the controller + * @instance: Adapter soft state + */ +static void megasas_shutdown_controller(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + if (instance->aen_cmd) + megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->timeout = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN; + + megasas_issue_blocked_cmd(instance, cmd); + + megasas_return_cmd(instance, cmd); + + return; +} + +/** + * megasas_detach_one - PCI hot"un"plug entry point + * @pdev: PCI device structure + */ +static void megasas_detach_one(struct pci_dev *pdev) +{ + int i; + struct Scsi_Host *host; + struct megasas_instance *instance; + + instance = pci_get_drvdata(pdev); + host = instance->host; + + scsi_remove_host(instance->host); + megasas_flush_cache(instance); + megasas_shutdown_controller(instance); + + /* + * Take the instance off the instance array. Note that we will not + * decrement the max_index. We let this array be sparse array + */ + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + if (megasas_mgmt_info.instance[i] == instance) { + megasas_mgmt_info.count--; + megasas_mgmt_info.instance[i] = NULL; + + break; + } + } + + pci_set_drvdata(instance->pdev, NULL); + + megasas_disable_intr(instance->reg_set); + + free_irq(instance->pdev->irq, instance); + + megasas_release_mfi(instance); + + pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), + instance->evt_detail, instance->evt_detail_h); + + pci_free_consistent(pdev, sizeof(u32), instance->producer, + instance->producer_h); + + pci_free_consistent(pdev, sizeof(u32), instance->consumer, + instance->consumer_h); + + scsi_host_put(host); + + pci_set_drvdata(pdev, NULL); + + pci_disable_device(pdev); + + return; +} + +/** + * megasas_shutdown - Shutdown entry point + * @device: Generic device structure + */ +static void megasas_shutdown(struct pci_dev *pdev) +{ + struct megasas_instance *instance = pci_get_drvdata(pdev); + megasas_flush_cache(instance); +} + +/** + * megasas_mgmt_open - char node "open" entry point + */ +static int megasas_mgmt_open(struct inode *inode, struct file *filep) +{ + /* + * Allow only those users with admin rights + */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return 0; +} + +/** + * megasas_mgmt_release - char node "release" entry point + */ +static int megasas_mgmt_release(struct inode *inode, struct file *filep) +{ + filep->private_data = NULL; + fasync_helper(-1, filep, 0, &megasas_async_queue); + + return 0; +} + +/** + * megasas_mgmt_fasync - Async notifier registration from applications + * + * This function adds the calling process to a driver global queue. When an + * event occurs, SIGIO will be sent to all processes in this queue. + */ +static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) +{ + int rc; + + down(&megasas_async_queue_mutex); + + rc = fasync_helper(fd, filep, mode, &megasas_async_queue); + + up(&megasas_async_queue_mutex); + + if (rc >= 0) { + /* For sanity check when we get ioctl */ + filep->private_data = filep; + return 0; + } + + printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); + + return rc; +} + +/** + * megasas_mgmt_fw_ioctl - Issues management ioctls to FW + * @instance: Adapter soft state + * @argp: User's ioctl packet + */ +static int +megasas_mgmt_fw_ioctl(struct megasas_instance *instance, + struct megasas_iocpacket __user * user_ioc, + struct megasas_iocpacket *ioc) +{ + struct megasas_sge32 *kern_sge32; + struct megasas_cmd *cmd; + void *kbuff_arr[MAX_IOCTL_SGE]; + dma_addr_t buf_handle = 0; + int error = 0, i; + void *sense = NULL; + dma_addr_t sense_handle; + u32 *sense_ptr; + + memset(kbuff_arr, 0, sizeof(kbuff_arr)); + + if (ioc->sge_count > MAX_IOCTL_SGE) { + printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", + ioc->sge_count, MAX_IOCTL_SGE); + return -EINVAL; + } + + cmd = megasas_get_cmd(instance); + if (!cmd) { + printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); + return -ENOMEM; + } + + /* + * User's IOCTL packet has 2 frames (maximum). Copy those two + * frames into our cmd's frames. cmd->frame's context will get + * overwritten when we copy from user's frames. So set that value + * alone separately + */ + memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); + cmd->frame->hdr.context = cmd->index; + + /* + * The management interface between applications and the fw uses + * MFI frames. E.g, RAID configuration changes, LD property changes + * etc are accomplishes through different kinds of MFI frames. The + * driver needs to care only about substituting user buffers with + * kernel buffers in SGLs. The location of SGL is embedded in the + * struct iocpacket itself. + */ + kern_sge32 = (struct megasas_sge32 *) + ((unsigned long)cmd->frame + ioc->sgl_off); + + /* + * For each user buffer, create a mirror buffer and copy in + */ + for (i = 0; i < ioc->sge_count; i++) { + kbuff_arr[i] = pci_alloc_consistent(instance->pdev, + ioc->sgl[i].iov_len, + &buf_handle); + if (!kbuff_arr[i]) { + printk(KERN_DEBUG "megasas: Failed to alloc " + "kernel SGL buffer for IOCTL \n"); + error = -ENOMEM; + goto out; + } + + /* + * We don't change the dma_coherent_mask, so + * pci_alloc_consistent only returns 32bit addresses + */ + kern_sge32[i].phys_addr = (u32) buf_handle; + kern_sge32[i].length = ioc->sgl[i].iov_len; + + /* + * We created a kernel buffer corresponding to the + * user buffer. Now copy in from the user buffer + */ + if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, + (u32) (ioc->sgl[i].iov_len))) { + error = -EFAULT; + goto out; + } + } + + if (ioc->sense_len) { + sense = pci_alloc_consistent(instance->pdev, ioc->sense_len, + &sense_handle); + if (!sense) { + error = -ENOMEM; + goto out; + } + + sense_ptr = + (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); + *sense_ptr = sense_handle; + } + + /* + * Set the sync_cmd flag so that the ISR knows not to complete this + * cmd to the SCSI mid-layer + */ + cmd->sync_cmd = 1; + megasas_issue_blocked_cmd(instance, cmd); + cmd->sync_cmd = 0; + + /* + * copy out the kernel buffers to user buffers + */ + for (i = 0; i < ioc->sge_count; i++) { + if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], + ioc->sgl[i].iov_len)) { + error = -EFAULT; + goto out; + } + } + + /* + * copy out the sense + */ + if (ioc->sense_len) { + /* + * sense_ptr points to the location that has the user + * sense buffer address + */ + sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + + ioc->sense_off); + + if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), + sense, ioc->sense_len)) { + error = -EFAULT; + goto out; + } + } + + /* + * copy the status codes returned by the fw + */ + if (copy_to_user(&user_ioc->frame.hdr.cmd_status, + &cmd->frame->hdr.cmd_status, sizeof(u8))) { + printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); + error = -EFAULT; + } + + out: + if (sense) { + pci_free_consistent(instance->pdev, ioc->sense_len, + sense, sense_handle); + } + + for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { + pci_free_consistent(instance->pdev, + kern_sge32[i].length, + kbuff_arr[i], kern_sge32[i].phys_addr); + } + + megasas_return_cmd(instance, cmd); + return error; +} + +static struct megasas_instance *megasas_lookup_instance(u16 host_no) +{ + int i; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + + if ((megasas_mgmt_info.instance[i]) && + (megasas_mgmt_info.instance[i]->host->host_no == host_no)) + return megasas_mgmt_info.instance[i]; + } + + return NULL; +} + +static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) +{ + struct megasas_iocpacket __user *user_ioc = + (struct megasas_iocpacket __user *)arg; + struct megasas_iocpacket *ioc; + struct megasas_instance *instance; + int error; + + ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) + return -ENOMEM; + + if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { + error = -EFAULT; + goto out_kfree_ioc; + } + + instance = megasas_lookup_instance(ioc->host_no); + if (!instance) { + error = -ENODEV; + goto out_kfree_ioc; + } + + /* + * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds + */ + if (down_interruptible(&instance->ioctl_sem)) { + error = -ERESTARTSYS; + goto out_kfree_ioc; + } + error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + up(&instance->ioctl_sem); + + out_kfree_ioc: + kfree(ioc); + return error; +} + +static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) +{ + struct megasas_instance *instance; + struct megasas_aen aen; + int error; + + if (file->private_data != file) { + printk(KERN_DEBUG "megasas: fasync_helper was not " + "called first\n"); + return -EINVAL; + } + + if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) + return -EFAULT; + + instance = megasas_lookup_instance(aen.host_no); + + if (!instance) + return -ENODEV; + + down(&instance->aen_mutex); + error = megasas_register_aen(instance, aen.seq_num, + aen.class_locale_word); + up(&instance->aen_mutex); + return error; +} + +/** + * megasas_mgmt_ioctl - char node ioctl entry point + */ +static long +megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE: + return megasas_mgmt_ioctl_fw(file, arg); + + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} + +#ifdef CONFIG_COMPAT +static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) +{ + struct compat_megasas_iocpacket __user *cioc = + (struct compat_megasas_iocpacket __user *)arg; + struct megasas_iocpacket __user *ioc = + compat_alloc_user_space(sizeof(struct megasas_iocpacket)); + int i; + int error = 0; + + clear_user(ioc, sizeof(*ioc)); + + if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || + copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || + copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || + copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || + copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || + copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) + return -EFAULT; + + for (i = 0; i < MAX_IOCTL_SGE; i++) { + compat_uptr_t ptr; + + if (get_user(ptr, &cioc->sgl[i].iov_base) || + put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || + copy_in_user(&ioc->sgl[i].iov_len, + &cioc->sgl[i].iov_len, sizeof(compat_size_t))) + return -EFAULT; + } + + error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); + + if (copy_in_user(&cioc->frame.hdr.cmd_status, + &ioc->frame.hdr.cmd_status, sizeof(u8))) { + printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); + return -EFAULT; + } + return error; +} + +static long +megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE:{ + return megasas_mgmt_compat_ioctl_fw(file, arg); + } + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} +#endif + +/* + * File operations structure for management interface + */ +static struct file_operations megasas_mgmt_fops = { + .owner = THIS_MODULE, + .open = megasas_mgmt_open, + .release = megasas_mgmt_release, + .fasync = megasas_mgmt_fasync, + .unlocked_ioctl = megasas_mgmt_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = megasas_mgmt_compat_ioctl, +#endif +}; + +/* + * PCI hotplug support registration structure + */ +static struct pci_driver megasas_pci_driver = { + + .name = "megaraid_sas", + .id_table = megasas_pci_table, + .probe = megasas_probe_one, + .remove = __devexit_p(megasas_detach_one), + .shutdown = megasas_shutdown, +}; + +/* + * Sysfs driver attributes + */ +static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", + MEGASAS_VERSION); +} + +static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); + +static ssize_t +megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", + MEGASAS_RELDATE); +} + +static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, + NULL); + +/** + * megasas_init - Driver load entry point + */ +static int __init megasas_init(void) +{ + int rval; + + /* + * Announce driver version and other information + */ + printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, + MEGASAS_EXT_VERSION); + + memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); + + /* + * Register character device node + */ + rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); + + if (rval < 0) { + printk(KERN_DEBUG "megasas: failed to open device node\n"); + return rval; + } + + megasas_mgmt_majorno = rval; + + /* + * Register ourselves as PCI hotplug module + */ + rval = pci_module_init(&megasas_pci_driver); + + if (rval) { + printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); + } + + driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); + driver_create_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + + return rval; +} + +/** + * megasas_exit - Driver unload entry point + */ +static void __exit megasas_exit(void) +{ + driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + + pci_unregister_driver(&megasas_pci_driver); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); +} + +module_init(megasas_init); +module_exit(megasas_exit); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h new file mode 100644 index 000000000000..eaec9d531424 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -0,0 +1,1142 @@ +/* + * + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2005 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_sas.h + */ + +#ifndef LSI_MEGARAID_SAS_H +#define LSI_MEGARAID_SAS_H + +/** + * MegaRAID SAS Driver meta data + */ +#define MEGASAS_VERSION "00.00.02.00-rc4" +#define MEGASAS_RELDATE "Sep 16, 2005" +#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005" + +/* + * ===================================== + * MegaRAID SAS MFI firmware definitions + * ===================================== + */ + +/* + * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for + * protocol between the software and firmware. Commands are issued using + * "message frames" + */ + +/** + * FW posts its state in upper 4 bits of outbound_msg_0 register + */ +#define MFI_STATE_MASK 0xF0000000 +#define MFI_STATE_UNDEFINED 0x00000000 +#define MFI_STATE_BB_INIT 0x10000000 +#define MFI_STATE_FW_INIT 0x40000000 +#define MFI_STATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_STATE_FW_INIT_2 0x70000000 +#define MFI_STATE_DEVICE_SCAN 0x80000000 +#define MFI_STATE_FLUSH_CACHE 0xA0000000 +#define MFI_STATE_READY 0xB0000000 +#define MFI_STATE_OPERATIONAL 0xC0000000 +#define MFI_STATE_FAULT 0xF0000000 + +#define MEGAMFI_FRAME_SIZE 64 + +/** + * During FW init, clear pending cmds & reset state using inbound_msg_0 + * + * ABORT : Abort all pending cmds + * READY : Move from OPERATIONAL to READY state; discard queue info + * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??) + * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver + */ +#define MFI_INIT_ABORT 0x00000000 +#define MFI_INIT_READY 0x00000002 +#define MFI_INIT_MFIMODE 0x00000004 +#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 +#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE + +/** + * MFI frame flags + */ +#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 +#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 +#define MFI_FRAME_SGL32 0x0000 +#define MFI_FRAME_SGL64 0x0002 +#define MFI_FRAME_SENSE32 0x0000 +#define MFI_FRAME_SENSE64 0x0004 +#define MFI_FRAME_DIR_NONE 0x0000 +#define MFI_FRAME_DIR_WRITE 0x0008 +#define MFI_FRAME_DIR_READ 0x0010 +#define MFI_FRAME_DIR_BOTH 0x0018 + +/** + * Definition for cmd_status + */ +#define MFI_CMD_STATUS_POLL_MODE 0xFF + +/** + * MFI command opcodes + */ +#define MFI_CMD_INIT 0x00 +#define MFI_CMD_LD_READ 0x01 +#define MFI_CMD_LD_WRITE 0x02 +#define MFI_CMD_LD_SCSI_IO 0x03 +#define MFI_CMD_PD_SCSI_IO 0x04 +#define MFI_CMD_DCMD 0x05 +#define MFI_CMD_ABORT 0x06 +#define MFI_CMD_SMP 0x07 +#define MFI_CMD_STP 0x08 + +#define MR_DCMD_CTRL_GET_INFO 0x01010000 + +#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 +#define MR_FLUSH_CTRL_CACHE 0x01 +#define MR_FLUSH_DISK_CACHE 0x02 + +#define MR_DCMD_CTRL_SHUTDOWN 0x01050000 +#define MR_ENABLE_DRIVE_SPINDOWN 0x01 + +#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 +#define MR_DCMD_CTRL_EVENT_GET 0x01040300 +#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500 +#define MR_DCMD_LD_GET_PROPERTIES 0x03030000 + +#define MR_DCMD_CLUSTER 0x08000000 +#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 +#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 + +/** + * MFI command completion codes + */ +enum MFI_STAT { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD = 0x01, + MFI_STAT_INVALID_DCMD = 0x02, + MFI_STAT_INVALID_PARAMETER = 0x03, + MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04, + MFI_STAT_ABORT_NOT_POSSIBLE = 0x05, + MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06, + MFI_STAT_APP_IN_USE = 0x07, + MFI_STAT_APP_NOT_INITIALIZED = 0x08, + MFI_STAT_ARRAY_INDEX_INVALID = 0x09, + MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a, + MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b, + MFI_STAT_DEVICE_NOT_FOUND = 0x0c, + MFI_STAT_DRIVE_TOO_SMALL = 0x0d, + MFI_STAT_FLASH_ALLOC_FAIL = 0x0e, + MFI_STAT_FLASH_BUSY = 0x0f, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD = 0x11, + MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12, + MFI_STAT_FLASH_NOT_OPEN = 0x13, + MFI_STAT_FLASH_NOT_STARTED = 0x14, + MFI_STAT_FLUSH_FAILED = 0x15, + MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16, + MFI_STAT_LD_CC_IN_PROGRESS = 0x17, + MFI_STAT_LD_INIT_IN_PROGRESS = 0x18, + MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19, + MFI_STAT_LD_MAX_CONFIGURED = 0x1a, + MFI_STAT_LD_NOT_OPTIMAL = 0x1b, + MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c, + MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d, + MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e, + MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR = 0x21, + MFI_STAT_NO_HW_PRESENT = 0x22, + MFI_STAT_NOT_FOUND = 0x23, + MFI_STAT_NOT_IN_ENCL = 0x24, + MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25, + MFI_STAT_PD_TYPE_WRONG = 0x26, + MFI_STAT_PR_DISABLED = 0x27, + MFI_STAT_ROW_INDEX_INVALID = 0x28, + MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29, + MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a, + MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b, + MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c, + MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d, + MFI_STAT_SCSI_IO_FAILED = 0x2e, + MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET = 0x31, + MFI_STAT_WRONG_STATE = 0x32, + MFI_STAT_LD_OFFLINE = 0x33, + MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34, + MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35, + MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, + MFI_STAT_I2C_ERRORS_DETECTED = 0x37, + MFI_STAT_PCI_ERRORS_DETECTED = 0x38, + + MFI_STAT_INVALID_STATUS = 0xFF +}; + +/* + * Number of mailbox bytes in DCMD message frame + */ +#define MFI_MBOX_SIZE 12 + +enum MR_EVT_CLASS { + + MR_EVT_CLASS_DEBUG = -2, + MR_EVT_CLASS_PROGRESS = -1, + MR_EVT_CLASS_INFO = 0, + MR_EVT_CLASS_WARNING = 1, + MR_EVT_CLASS_CRITICAL = 2, + MR_EVT_CLASS_FATAL = 3, + MR_EVT_CLASS_DEAD = 4, + +}; + +enum MR_EVT_LOCALE { + + MR_EVT_LOCALE_LD = 0x0001, + MR_EVT_LOCALE_PD = 0x0002, + MR_EVT_LOCALE_ENCL = 0x0004, + MR_EVT_LOCALE_BBU = 0x0008, + MR_EVT_LOCALE_SAS = 0x0010, + MR_EVT_LOCALE_CTRL = 0x0020, + MR_EVT_LOCALE_CONFIG = 0x0040, + MR_EVT_LOCALE_CLUSTER = 0x0080, + MR_EVT_LOCALE_ALL = 0xffff, + +}; + +enum MR_EVT_ARGS { + + MR_EVT_ARGS_NONE, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC, + +}; + +/* + * SAS controller properties + */ +struct megasas_ctrl_prop { + + u16 seq_num; + u16 pred_fail_poll_interval; + u16 intr_throttle_count; + u16 intr_throttle_timeouts; + u8 rebuild_rate; + u8 patrol_read_rate; + u8 bgi_rate; + u8 cc_rate; + u8 recon_rate; + u8 cache_flush_interval; + u8 spinup_drv_count; + u8 spinup_delay; + u8 cluster_enable; + u8 coercion_mode; + u8 alarm_enable; + u8 disable_auto_rebuild; + u8 disable_battery_warn; + u8 ecc_bucket_size; + u16 ecc_bucket_leak_rate; + u8 restore_hotspare_on_insertion; + u8 expose_encl_devices; + u8 reserved[38]; + +} __attribute__ ((packed)); + +/* + * SAS controller information + */ +struct megasas_ctrl_info { + + /* + * PCI device information + */ + struct { + + u16 vendor_id; + u16 device_id; + u16 sub_vendor_id; + u16 sub_device_id; + u8 reserved[24]; + + } __attribute__ ((packed)) pci; + + /* + * Host interface information + */ + struct { + + u8 PCIX:1; + u8 PCIE:1; + u8 iSCSI:1; + u8 SAS_3G:1; + u8 reserved_0:4; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) host_interface; + + /* + * Device (backend) interface information + */ + struct { + + u8 SPI:1; + u8 SAS_3G:1; + u8 SATA_1_5G:1; + u8 SATA_3G:1; + u8 reserved_0:4; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) device_interface; + + /* + * List of components residing in flash. All str are null terminated + */ + u32 image_check_word; + u32 image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char built_time[16]; + + } __attribute__ ((packed)) image_component[8]; + + /* + * List of flash components that have been flashed on the card, but + * are not in use, pending reset of the adapter. This list will be + * empty if a flash operation has not occurred. All stings are null + * terminated + */ + u32 pending_image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; + + } __attribute__ ((packed)) pending_image_component[8]; + + u8 max_arms; + u8 max_spans; + u8 max_arrays; + u8 max_lds; + + char product_name[80]; + char serial_no[32]; + + /* + * Other physical/controller/operation information. Indicates the + * presence of the hardware + */ + struct { + + u32 bbu:1; + u32 alarm:1; + u32 nvram:1; + u32 uart:1; + u32 reserved:28; + + } __attribute__ ((packed)) hw_present; + + u32 current_fw_time; + + /* + * Maximum data transfer sizes + */ + u16 max_concurrent_cmds; + u16 max_sge_count; + u32 max_request_size; + + /* + * Logical and physical device counts + */ + u16 ld_present_count; + u16 ld_degraded_count; + u16 ld_offline_count; + + u16 pd_present_count; + u16 pd_disk_present_count; + u16 pd_disk_pred_failure_count; + u16 pd_disk_failed_count; + + /* + * Memory size information + */ + u16 nvram_size; + u16 memory_size; + u16 flash_size; + + /* + * Error counters + */ + u16 mem_correctable_error_count; + u16 mem_uncorrectable_error_count; + + /* + * Cluster information + */ + u8 cluster_permitted; + u8 cluster_active; + + /* + * Additional max data transfer sizes + */ + u16 max_strips_per_io; + + /* + * Controller capabilities structures + */ + struct { + + u32 raid_level_0:1; + u32 raid_level_1:1; + u32 raid_level_5:1; + u32 raid_level_1E:1; + u32 raid_level_6:1; + u32 reserved:27; + + } __attribute__ ((packed)) raid_levels; + + struct { + + u32 rbld_rate:1; + u32 cc_rate:1; + u32 bgi_rate:1; + u32 recon_rate:1; + u32 patrol_rate:1; + u32 alarm_control:1; + u32 cluster_supported:1; + u32 bbu:1; + u32 spanning_allowed:1; + u32 dedicated_hotspares:1; + u32 revertible_hotspares:1; + u32 foreign_config_import:1; + u32 self_diagnostic:1; + u32 mixed_redundancy_arr:1; + u32 global_hot_spares:1; + u32 reserved:17; + + } __attribute__ ((packed)) adapter_operations; + + struct { + + u32 read_policy:1; + u32 write_policy:1; + u32 io_policy:1; + u32 access_policy:1; + u32 disk_cache_policy:1; + u32 reserved:27; + + } __attribute__ ((packed)) ld_operations; + + struct { + + u8 min; + u8 max; + u8 reserved[2]; + + } __attribute__ ((packed)) stripe_sz_ops; + + struct { + + u32 force_online:1; + u32 force_offline:1; + u32 force_rebuild:1; + u32 reserved:29; + + } __attribute__ ((packed)) pd_operations; + + struct { + + u32 ctrl_supports_sas:1; + u32 ctrl_supports_sata:1; + u32 allow_mix_in_encl:1; + u32 allow_mix_in_ld:1; + u32 allow_sata_in_cluster:1; + u32 reserved:27; + + } __attribute__ ((packed)) pd_mix_support; + + /* + * Define ECC single-bit-error bucket information + */ + u8 ecc_bucket_count; + u8 reserved_2[11]; + + /* + * Include the controller properties (changeable items) + */ + struct megasas_ctrl_prop properties; + + /* + * Define FW pkg version (set in envt v'bles on OEM basis) + */ + char package_version[0x60]; + + u8 pad[0x800 - 0x6a0]; + +} __attribute__ ((packed)); + +/* + * =============================== + * MegaRAID SAS driver definitions + * =============================== + */ +#define MEGASAS_MAX_PD_CHANNELS 2 +#define MEGASAS_MAX_LD_CHANNELS 2 +#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ + MEGASAS_MAX_LD_CHANNELS) +#define MEGASAS_MAX_DEV_PER_CHANNEL 128 +#define MEGASAS_DEFAULT_INIT_ID -1 +#define MEGASAS_MAX_LUN 8 +#define MEGASAS_MAX_LD 64 + +/* + * When SCSI mid-layer calls driver's reset routine, driver waits for + * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note + * that the driver cannot _actually_ abort or reset pending commands. While + * it is waiting for the commands to complete, it prints a diagnostic message + * every MEGASAS_RESET_NOTICE_INTERVAL seconds + */ +#define MEGASAS_RESET_WAIT_TIME 180 +#define MEGASAS_RESET_NOTICE_INTERVAL 5 + +#define MEGASAS_IOCTL_CMD 0 + +/* + * FW reports the maximum of number of commands that it can accept (maximum + * commands that can be outstanding) at any time. The driver must report a + * lower number to the mid layer because it can issue a few internal commands + * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs + * is shown below + */ +#define MEGASAS_INT_CMDS 32 + +/* + * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit + * SGLs based on the size of dma_addr_t + */ +#define IS_DMA64 (sizeof(dma_addr_t) == 8) + +#define MFI_OB_INTR_STATUS_MASK 0x00000002 +#define MFI_POLL_TIMEOUT_SECS 10 + +struct megasas_register_set { + + u32 reserved_0[4]; /*0000h */ + + u32 inbound_msg_0; /*0010h */ + u32 inbound_msg_1; /*0014h */ + u32 outbound_msg_0; /*0018h */ + u32 outbound_msg_1; /*001Ch */ + + u32 inbound_doorbell; /*0020h */ + u32 inbound_intr_status; /*0024h */ + u32 inbound_intr_mask; /*0028h */ + + u32 outbound_doorbell; /*002Ch */ + u32 outbound_intr_status; /*0030h */ + u32 outbound_intr_mask; /*0034h */ + + u32 reserved_1[2]; /*0038h */ + + u32 inbound_queue_port; /*0040h */ + u32 outbound_queue_port; /*0044h */ + + u32 reserved_2; /*004Ch */ + + u32 index_registers[1004]; /*0050h */ + +} __attribute__ ((packed)); + +struct megasas_sge32 { + + u32 phys_addr; + u32 length; + +} __attribute__ ((packed)); + +struct megasas_sge64 { + + u64 phys_addr; + u32 length; + +} __attribute__ ((packed)); + +union megasas_sgl { + + struct megasas_sge32 sge32[1]; + struct megasas_sge64 sge64[1]; + +} __attribute__ ((packed)); + +struct megasas_header { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 data_xferlen; /*14h */ + +} __attribute__ ((packed)); + +union megasas_sgl_frame { + + struct megasas_sge32 sge32[8]; + struct megasas_sge64 sge64[5]; + +} __attribute__ ((packed)); + +struct megasas_init_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + u32 reserved_2; /*04h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 reserved_3; /*12h */ + u32 data_xfer_len; /*14h */ + + u32 queue_info_new_phys_addr_lo; /*18h */ + u32 queue_info_new_phys_addr_hi; /*1Ch */ + u32 queue_info_old_phys_addr_lo; /*20h */ + u32 queue_info_old_phys_addr_hi; /*24h */ + + u32 reserved_4[6]; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_init_queue_info { + + u32 init_flags; /*00h */ + u32 reply_queue_entries; /*04h */ + + u32 reply_queue_start_phys_addr_lo; /*08h */ + u32 reply_queue_start_phys_addr_hi; /*0Ch */ + u32 producer_index_phys_addr_lo; /*10h */ + u32 producer_index_phys_addr_hi; /*14h */ + u32 consumer_index_phys_addr_lo; /*18h */ + u32 consumer_index_phys_addr_hi; /*1Ch */ + +} __attribute__ ((packed)); + +struct megasas_io_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 access_byte; /*05h */ + u8 reserved_0; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 lba_count; /*14h */ + + u32 sense_buf_phys_addr_lo; /*18h */ + u32 sense_buf_phys_addr_hi; /*1Ch */ + + u32 start_lba_lo; /*20h */ + u32 start_lba_hi; /*24h */ + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_pthru_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 data_xfer_len; /*14h */ + + u32 sense_buf_phys_addr_lo; /*18h */ + u32 sense_buf_phys_addr_hi; /*1Ch */ + + u8 cdb[16]; /*20h */ + union megasas_sgl sgl; /*30h */ + +} __attribute__ ((packed)); + +struct megasas_dcmd_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_1[4]; /*03h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + u32 opcode; /*18h */ + + union { /*1Ch */ + u8 b[12]; + u16 s[6]; + u32 w[3]; + } mbox; + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_abort_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + u32 reserved_2; /*04h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 reserved_3; /*12h */ + u32 reserved_4; /*14h */ + + u32 abort_context; /*18h */ + u32 pad_1; /*1Ch */ + + u32 abort_mfi_phys_addr_lo; /*20h */ + u32 abort_mfi_phys_addr_hi; /*24h */ + + u32 reserved_5[6]; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_smp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 connection_status; /*03h */ + + u8 reserved_2[3]; /*04h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + u64 sas_addr; /*18h */ + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */ + } sgl; + +} __attribute__ ((packed)); + +struct megasas_stp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_2; /*03h */ + + u8 target_id; /*04h */ + u8 reserved_3[2]; /*05h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + + u16 fis[10]; /*18h */ + u32 stp_flags; + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */ + } sgl; + +} __attribute__ ((packed)); + +union megasas_frame { + + struct megasas_header hdr; + struct megasas_init_frame init; + struct megasas_io_frame io; + struct megasas_pthru_frame pthru; + struct megasas_dcmd_frame dcmd; + struct megasas_abort_frame abort; + struct megasas_smp_frame smp; + struct megasas_stp_frame stp; + + u8 raw_bytes[64]; +}; + +struct megasas_cmd; + +union megasas_evt_class_locale { + + struct { + u16 locale; + u8 reserved; + s8 class; + } __attribute__ ((packed)) members; + + u32 word; + +} __attribute__ ((packed)); + +struct megasas_evt_log_info { + u32 newest_seq_num; + u32 oldest_seq_num; + u32 clear_seq_num; + u32 shutdown_seq_num; + u32 boot_seq_num; + +} __attribute__ ((packed)); + +struct megasas_progress { + + u16 progress; + u16 elapsed_seconds; + +} __attribute__ ((packed)); + +struct megasas_evtarg_ld { + + u16 target_id; + u8 ld_index; + u8 reserved; + +} __attribute__ ((packed)); + +struct megasas_evtarg_pd { + u16 device_id; + u8 encl_index; + u8 slot_number; + +} __attribute__ ((packed)); + +struct megasas_evt_detail { + + u32 seq_num; + u32 time_stamp; + u32 code; + union megasas_evt_class_locale cl; + u8 arg_type; + u8 reserved1[15]; + + union { + struct { + struct megasas_evtarg_pd pd; + u8 cdb_length; + u8 sense_length; + u8 reserved[2]; + u8 cdb[16]; + u8 sense[64]; + } __attribute__ ((packed)) cdbSense; + + struct megasas_evtarg_ld ld; + + struct { + struct megasas_evtarg_ld ld; + u64 count; + } __attribute__ ((packed)) ld_count; + + struct { + u64 lba; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_lba; + + struct { + struct megasas_evtarg_ld ld; + u32 prevOwner; + u32 newOwner; + } __attribute__ ((packed)) ld_owner; + + struct { + u64 ld_lba; + u64 pd_lba; + struct megasas_evtarg_ld ld; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) ld_lba_pd_lba; + + struct { + struct megasas_evtarg_ld ld; + struct megasas_progress prog; + } __attribute__ ((packed)) ld_prog; + + struct { + struct megasas_evtarg_ld ld; + u32 prev_state; + u32 new_state; + } __attribute__ ((packed)) ld_state; + + struct { + u64 strip; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_strip; + + struct megasas_evtarg_pd pd; + + struct { + struct megasas_evtarg_pd pd; + u32 err; + } __attribute__ ((packed)) pd_err; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) pd_lba; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) pd_lba_ld; + + struct { + struct megasas_evtarg_pd pd; + struct megasas_progress prog; + } __attribute__ ((packed)) pd_prog; + + struct { + struct megasas_evtarg_pd pd; + u32 prevState; + u32 newState; + } __attribute__ ((packed)) pd_state; + + struct { + u16 vendorId; + u16 deviceId; + u16 subVendorId; + u16 subDeviceId; + } __attribute__ ((packed)) pci; + + u32 rate; + char str[96]; + + struct { + u32 rtc; + u32 elapsedSeconds; + } __attribute__ ((packed)) time; + + struct { + u32 ecar; + u32 elog; + char str[64]; + } __attribute__ ((packed)) ecc; + + u8 b[96]; + u16 s[48]; + u32 w[24]; + u64 d[12]; + } args; + + char description[128]; + +} __attribute__ ((packed)); + +struct megasas_instance { + + u32 *producer; + dma_addr_t producer_h; + u32 *consumer; + dma_addr_t consumer_h; + + u32 *reply_queue; + dma_addr_t reply_queue_h; + + unsigned long base_addr; + struct megasas_register_set __iomem *reg_set; + + s8 init_id; + u8 reserved[3]; + + u16 max_num_sge; + u16 max_fw_cmds; + u32 max_sectors_per_req; + + struct megasas_cmd **cmd_list; + struct list_head cmd_pool; + spinlock_t cmd_pool_lock; + struct dma_pool *frame_dma_pool; + struct dma_pool *sense_dma_pool; + + struct megasas_evt_detail *evt_detail; + dma_addr_t evt_detail_h; + struct megasas_cmd *aen_cmd; + struct semaphore aen_mutex; + struct semaphore ioctl_sem; + + struct Scsi_Host *host; + + wait_queue_head_t int_cmd_wait_q; + wait_queue_head_t abort_cmd_wait_q; + + struct pci_dev *pdev; + u32 unique_id; + + u32 fw_outstanding; + u32 hw_crit_error; + spinlock_t instance_lock; +}; + +#define MEGASAS_IS_LOGICAL(scp) \ + (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 + +#define MEGASAS_DEV_INDEX(inst, scp) \ + ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id + +struct megasas_cmd { + + union megasas_frame *frame; + dma_addr_t frame_phys_addr; + u8 *sense; + dma_addr_t sense_phys_addr; + + u32 index; + u8 sync_cmd; + u8 cmd_status; + u16 abort_aen; + + struct list_head list; + struct scsi_cmnd *scmd; + struct megasas_instance *instance; + u32 frame_count; +}; + +#define MAX_MGMT_ADAPTERS 1024 +#define MAX_IOCTL_SGE 16 + +struct megasas_iocpacket { + + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + + struct iovec sgl[MAX_IOCTL_SGE]; + +} __attribute__ ((packed)); + +struct megasas_aen { + u16 host_no; + u16 __pad1; + u32 seq_num; + u32 class_locale_word; +} __attribute__ ((packed)); + +#ifdef CONFIG_COMPAT +struct compat_megasas_iocpacket { + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + struct compat_iovec sgl[MAX_IOCTL_SGE]; +} __attribute__ ((packed)); + +#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket) +#else +#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket) +#endif + +#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen) + +struct megasas_mgmt_info { + + u16 count; + struct megasas_instance *instance[MAX_MGMT_ADAPTERS]; + int max_index; +}; + +#endif /*LSI_MEGARAID_SAS_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c49d28eca561..20fb79810c3c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -185,6 +185,7 @@ #define PCI_DEVICE_ID_LSI_61C102 0x0901 #define PCI_DEVICE_ID_LSI_63C815 0x1000 #define PCI_DEVICE_ID_LSI_SAS1064 0x0050 +#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 #define PCI_DEVICE_ID_LSI_SAS1066 0x005E #define PCI_DEVICE_ID_LSI_SAS1068 0x0054 #define PCI_DEVICE_ID_LSI_SAS1064A 0x005C @@ -559,6 +560,7 @@ #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_DEVICE_ID_DELL_RACIII 0x0008 #define PCI_DEVICE_ID_DELL_RAC4 0x0012 +#define PCI_DEVICE_ID_DELL_PERC5 0x0015 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 -- cgit v1.2.3 From 9356b8fc07dc126cd91d2b12f314d760ab48996e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 27 Sep 2005 15:23:16 -0700 Subject: [NET]: Reorder some hot fields of struct net_device Place them on separate cache lines in SMP to lower memory bouncing between multiple CPU accessing the device. - One part is mostly used on receive path (including eth_type_trans()) (poll_list, poll, quota, weight, last_rx, dev_addr, broadcast) - One part is mostly used on queue transmit path (qdisc) (queue_lock, qdisc, qdisc_sleeping, qdisc_list, tx_queue_len) - One part is mostly used on xmit path (device) (xmit_lock, xmit_lock_owner, priv, hard_start_xmit, trans_start) 'features' is placed outside of these hot points, in a location that may be shared by all cpus (because mostly read) name_hlist is moved close to name[IFNAMSIZ] to speedup __dev_get_by_name() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 89 +++++++++++++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7c717907896d..368e4c825ff1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -265,6 +265,8 @@ struct net_device * the interface. */ char name[IFNAMSIZ]; + /* device name hash chain */ + struct hlist_node name_hlist; /* * I/O specific fields @@ -292,6 +294,21 @@ struct net_device /* ------- Fields preinitialized in Space.c finish here ------- */ + /* Net device features */ + unsigned long features; +#define NETIF_F_SG 1 /* Scatter/gather IO. */ +#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ +#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ +#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ +#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ +#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ +#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ +#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ +#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ +#define NETIF_F_LLTX 4096 /* LockLess TX */ + struct net_device *next_sched; /* Interface index. Unique device identifier */ @@ -316,9 +333,6 @@ struct net_device * will (read: may be cleaned up at will). */ - /* These may be needed for future network-power-down code. */ - unsigned long trans_start; /* Time (in jiffies) of last Tx */ - unsigned long last_rx; /* Time of last Rx */ unsigned short flags; /* interface flags (a la BSD) */ unsigned short gflags; @@ -328,15 +342,12 @@ struct net_device unsigned mtu; /* interface MTU value */ unsigned short type; /* interface hardware type */ unsigned short hard_header_len; /* hardware hdr length */ - void *priv; /* pointer to private data */ struct net_device *master; /* Pointer to master device of a group, * which this device is member of. */ /* Interface address info. */ - unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ - unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ @@ -346,8 +357,6 @@ struct net_device int promiscuity; int allmulti; - int watchdog_timeo; - struct timer_list watchdog_timer; /* Protocol specific pointers */ @@ -358,32 +367,62 @@ struct net_device void *ec_ptr; /* Econet specific data */ void *ax25_ptr; /* AX.25 specific data */ - struct list_head poll_list; /* Link to poll list */ +/* + * Cache line mostly used on receive path (including eth_type_trans()) + */ + struct list_head poll_list ____cacheline_aligned_in_smp; + /* Link to poll list */ + + int (*poll) (struct net_device *dev, int *quota); int quota; int weight; + unsigned long last_rx; /* Time of last Rx */ + /* Interface address info used in eth_type_trans() */ + unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast + because most packets are unicast) */ + + unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ +/* + * Cache line mostly used on queue transmit path (qdisc) + */ + /* device queue lock */ + spinlock_t queue_lock ____cacheline_aligned_in_smp; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; - struct Qdisc *qdisc_ingress; struct list_head qdisc_list; unsigned long tx_queue_len; /* Max frames per queue allowed */ /* ingress path synchronizer */ spinlock_t ingress_lock; + struct Qdisc *qdisc_ingress; + +/* + * One part is mostly used on xmit path (device) + */ /* hard_start_xmit synchronizer */ - spinlock_t xmit_lock; + spinlock_t xmit_lock ____cacheline_aligned_in_smp; /* cpu id of processor entered to hard_start_xmit or -1, if nobody entered there. */ int xmit_lock_owner; - /* device queue lock */ - spinlock_t queue_lock; + void *priv; /* pointer to private data */ + int (*hard_start_xmit) (struct sk_buff *skb, + struct net_device *dev); + /* These may be needed for future network-power-down code. */ + unsigned long trans_start; /* Time (in jiffies) of last Tx */ + + int watchdog_timeo; /* used by dev_watchdog() */ + struct timer_list watchdog_timer; + +/* + * refcnt is a very hot point, so align it on SMP + */ /* Number of references to this device */ - atomic_t refcnt; + atomic_t refcnt ____cacheline_aligned_in_smp; + /* delayed register/unregister */ struct list_head todo_list; - /* device name hash chain */ - struct hlist_node name_hlist; /* device index hash chain */ struct hlist_node index_hlist; @@ -396,21 +435,6 @@ struct net_device NETREG_RELEASED, /* called free_netdev */ } reg_state; - /* Net device features */ - unsigned long features; -#define NETIF_F_SG 1 /* Scatter/gather IO. */ -#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ -#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ -#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ -#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ -#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ -#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ -#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ -#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ -#define NETIF_F_LLTX 4096 /* LockLess TX */ - /* Called after device is detached from network. */ void (*uninit)(struct net_device *dev); /* Called after last user reference disappears. */ @@ -419,10 +443,7 @@ struct net_device /* Pointers to interface service routines. */ int (*open)(struct net_device *dev); int (*stop)(struct net_device *dev); - int (*hard_start_xmit) (struct sk_buff *skb, - struct net_device *dev); #define HAVE_NETDEV_POLL - int (*poll) (struct net_device *dev, int *quota); int (*hard_header) (struct sk_buff *skb, struct net_device *dev, unsigned short type, -- cgit v1.2.3 From 1f26dac32057baaf67d10b45c6b5277db862911d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 27 Sep 2005 15:24:13 -0700 Subject: [NET]: Add Sun Cassini driver. Written by Adrian Sun (asun@darksunrising.com). Ported to 2.6.x by Tom 'spot' Callaway . Further cleaned up and integrated by David S. Miller Signed-off-by: David S. Miller --- drivers/net/Kconfig | 8 + drivers/net/Makefile | 1 + drivers/net/cassini.c | 5311 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/cassini.h | 4425 +++++++++++++++++++++++++++++++++++++++ include/linux/pci_ids.h | 2 + 5 files changed, 9747 insertions(+) create mode 100644 drivers/net/cassini.c create mode 100644 drivers/net/cassini.h (limited to 'include/linux') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 96f14ab1c1f5..2a908c4690a7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -548,6 +548,14 @@ config SUNGEM Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also . +config CASSINI + tristate "Sun Cassini support" + depends on NET_ETHERNET && PCI + select CRC32 + help + Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also + + config NET_VENDOR_3COM bool "3COM cards" depends on NET_ETHERNET && (ISA || EISA || MCA || PCI) diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 8645c843cf4d..8aeec9f2495b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o +obj-$(CONFIG_CASSINI) += cassini.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c new file mode 100644 index 000000000000..69cb368247e7 --- /dev/null +++ b/drivers/net/cassini.c @@ -0,0 +1,5311 @@ +/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * This driver uses the sungem driver (c) David Miller + * (davem@redhat.com) as its basis. + * + * The cassini chip has a number of features that distinguish it from + * the gem chip: + * 4 transmit descriptor rings that are used for either QoS (VLAN) or + * load balancing (non-VLAN mode) + * batching of multiple packets + * multiple CPU dispatching + * page-based RX descriptor engine with separate completion rings + * Gigabit support (GMII and PCS interface) + * MIF link up/down detection works + * + * RX is handled by page sized buffers that are attached as fragments to + * the skb. here's what's done: + * -- driver allocates pages at a time and keeps reference counts + * on them. + * -- the upper protocol layers assume that the header is in the skb + * itself. as a result, cassini will copy a small amount (64 bytes) + * to make them happy. + * -- driver appends the rest of the data pages as frags to skbuffs + * and increments the reference count + * -- on page reclamation, the driver swaps the page with a spare page. + * if that page is still in use, it frees its reference to that page, + * and allocates a new page for use. otherwise, it just recycles the + * the page. + * + * NOTE: cassini can parse the header. however, it's not worth it + * as long as the network stack requires a header copy. + * + * TX has 4 queues. currently these queues are used in a round-robin + * fashion for load balancing. They can also be used for QoS. for that + * to work, however, QoS information needs to be exposed down to the driver + * level so that subqueues get targetted to particular transmit rings. + * alternatively, the queues can be configured via use of the all-purpose + * ioctl. + * + * RX DATA: the rx completion ring has all the info, but the rx desc + * ring has all of the data. RX can conceivably come in under multiple + * interrupts, but the INT# assignment needs to be set up properly by + * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do + * that. also, the two descriptor rings are designed to distinguish between + * encrypted and non-encrypted packets, but we use them for buffering + * instead. + * + * by default, the selective clear mask is set up to process rx packets. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) +#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) +#define CAS_NCPUS num_online_cpus() + +#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) +#define USE_NAPI +#define cas_skb_release(x) netif_receive_skb(x) +#else +#define cas_skb_release(x) netif_rx(x) +#endif + +/* select which firmware to use */ +#define USE_HP_WORKAROUND +#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ +#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ + +#include "cassini.h" + +#define USE_TX_COMPWB /* use completion writeback registers */ +#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ +#define USE_RX_BLANK /* hw interrupt mitigation */ +#undef USE_ENTROPY_DEV /* don't test for entropy device */ + +/* NOTE: these aren't useable unless PCI interrupts can be assigned. + * also, we need to make cp->lock finer-grained. + */ +#undef USE_PCI_INTB +#undef USE_PCI_INTC +#undef USE_PCI_INTD +#undef USE_QOS + +#undef USE_VPD_DEBUG /* debug vpd information if defined */ + +/* rx processing options */ +#define USE_PAGE_ORDER /* specify to allocate large rx pages */ +#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ +#define RX_COPY_ALWAYS 0 /* if 0, use frags */ +#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ +#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ + +#define DRV_MODULE_NAME "cassini" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.4" +#define DRV_MODULE_RELDATE "1 July 2004" + +#define CAS_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define CAS_TX_TIMEOUT (HZ) +#define CAS_LINK_TIMEOUT (22*HZ/10) +#define CAS_LINK_FAST_TIMEOUT (1) + +/* timeout values for state changing. these specify the number + * of 10us delays to be used before giving up. + */ +#define STOP_TRIES_PHY 1000 +#define STOP_TRIES 5000 + +/* specify a minimum frame size to deal with some fifo issues + * max mtu == 2 * page size - ethernet header - 64 - swivel = + * 2 * page_size - 0x50 + */ +#define CAS_MIN_FRAME 97 +#define CAS_1000MB_MIN_FRAME 255 +#define CAS_MIN_MTU 60 +#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) + +#if 1 +/* + * Eliminate these and use separate atomic counters for each, to + * avoid a race condition. + */ +#else +#define CAS_RESET_MTU 1 +#define CAS_RESET_ALL 2 +#define CAS_RESET_SPARE 3 +#endif + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); +MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_PARM(cassini_debug, "i"); +MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); +MODULE_PARM(link_mode, "i"); +MODULE_PARM_DESC(link_mode, "default link mode"); + +/* + * Work around for a PCS bug in which the link goes down due to the chip + * being confused and never showing a link status of "up." + */ +#define DEFAULT_LINKDOWN_TIMEOUT 5 +/* + * Value in seconds, for user input. + */ +static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; +MODULE_PARM(linkdown_timeout, "i"); +MODULE_PARM_DESC(linkdown_timeout, +"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); + +/* + * value in 'ticks' (units used by jiffies). Set when we init the + * module because 'HZ' in actually a function call on some flavors of + * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. + */ +static int link_transition_timeout; + + +static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ +static int link_mode; + +static u16 link_modes[] __devinitdata = { + BMCR_ANENABLE, /* 0 : autoneg */ + 0, /* 1 : 10bt half duplex */ + BMCR_SPEED100, /* 2 : 100bt half duplex */ + BMCR_FULLDPLX, /* 3 : 10bt full duplex */ + BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ + CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ +}; + +static struct pci_device_id cas_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, cas_pci_tbl); + +static void cas_set_link_modes(struct cas *cp); + +static inline void cas_lock_tx(struct cas *cp) +{ + int i; + + for (i = 0; i < N_TX_RINGS; i++) + spin_lock(&cp->tx_lock[i]); +} + +static inline void cas_lock_all(struct cas *cp) +{ + spin_lock_irq(&cp->lock); + cas_lock_tx(cp); +} + +/* WTZ: QA was finding deadlock problems with the previous + * versions after long test runs with multiple cards per machine. + * See if replacing cas_lock_all with safer versions helps. The + * symptoms QA is reporting match those we'd expect if interrupts + * aren't being properly restored, and we fixed a previous deadlock + * with similar symptoms by using save/restore versions in other + * places. + */ +#define cas_lock_all_save(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + spin_lock_irqsave(&xxxcp->lock, flags); \ + cas_lock_tx(xxxcp); \ +} while (0) + +static inline void cas_unlock_tx(struct cas *cp) +{ + int i; + + for (i = N_TX_RINGS; i > 0; i--) + spin_unlock(&cp->tx_lock[i - 1]); +} + +static inline void cas_unlock_all(struct cas *cp) +{ + cas_unlock_tx(cp); + spin_unlock_irq(&cp->lock); +} + +#define cas_unlock_all_restore(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + cas_unlock_tx(xxxcp); \ + spin_unlock_irqrestore(&xxxcp->lock, flags); \ +} while (0) + +static void cas_disable_irq(struct cas *cp, const int ring) +{ + /* Make sure we won't get any more interrupts */ + if (ring == 0) { + writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); + return; + } + + /* disable completion interrupts and selectively mask */ + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, + cp->regs + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + writel(INTRN_MASK_CLEAR_ALL, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; + } + } +} + +static inline void cas_mask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_disable_irq(cp, i); +} + +static void cas_enable_irq(struct cas *cp, const int ring) +{ + if (ring == 0) { /* all but TX_DONE */ + writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); + return; + } + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_RX_EN, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + break; + } + } +} + +static inline void cas_unmask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_enable_irq(cp, i); +} + +static inline void cas_entropy_gather(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), + readl(cp->regs + REG_ENTROPY_IV), + sizeof(uint64_t)*8); +#endif +} + +static inline void cas_entropy_reset(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, + cp->regs + REG_BIM_LOCAL_DEV_EN); + writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); + writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); + + /* if we read back 0x0, we don't have an entropy device */ + if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) + cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; +#endif +} + +/* access to the phy. the following assumes that we've initialized the MIF to + * be in frame rather than bit-bang mode + */ +static u16 cas_phy_read(struct cas *cp, int reg) +{ + u32 cmd; + int limit = STOP_TRIES_PHY; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return (cmd & MIF_FRAME_DATA_MASK); + } + return 0xFFFF; /* -1 */ +} + +static int cas_phy_write(struct cas *cp, int reg, u16 val) +{ + int limit = STOP_TRIES_PHY; + u32 cmd; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + cmd |= val & MIF_FRAME_DATA_MASK; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return 0; + } + return -1; +} + +static void cas_phy_powerup(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if ((ctl & BMCR_PDOWN) == 0) + return; + ctl &= ~BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +static void cas_phy_powerdown(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if (ctl & BMCR_PDOWN) + return; + ctl |= BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +/* cp->lock held. note: the last put_page will free the buffer */ +static int cas_page_free(struct cas *cp, cas_page_t *page) +{ + pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, + PCI_DMA_FROMDEVICE); + __free_pages(page->buffer, cp->page_order); + kfree(page); + return 0; +} + +#ifdef RX_COUNT_BUFFERS +#define RX_USED_ADD(x, y) ((x)->used += (y)) +#define RX_USED_SET(x, y) ((x)->used = (y)) +#else +#define RX_USED_ADD(x, y) +#define RX_USED_SET(x, y) +#endif + +/* local page allocation routines for the receive buffers. jumbo pages + * require at least 8K contiguous and 8K aligned buffers. + */ +static cas_page_t *cas_page_alloc(struct cas *cp, const int flags) +{ + cas_page_t *page; + + page = kmalloc(sizeof(cas_page_t), flags); + if (!page) + return NULL; + + INIT_LIST_HEAD(&page->list); + RX_USED_SET(page, 0); + page->buffer = alloc_pages(flags, cp->page_order); + if (!page->buffer) + goto page_err; + page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, + cp->page_size, PCI_DMA_FROMDEVICE); + return page; + +page_err: + kfree(page); + return NULL; +} + +/* initialize spare pool of rx buffers, but allocate during the open */ +static void cas_spare_init(struct cas *cp) +{ + spin_lock(&cp->rx_inuse_lock); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + + spin_lock(&cp->rx_spare_lock); + INIT_LIST_HEAD(&cp->rx_spare_list); + cp->rx_spares_needed = RX_SPARE_COUNT; + spin_unlock(&cp->rx_spare_lock); +} + +/* used on close. free all the spare buffers. */ +static void cas_spare_free(struct cas *cp) +{ + struct list_head list, *elem, *tmp; + + /* free spare buffers */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_spare_lock); + list_splice(&cp->rx_spare_list, &list); + INIT_LIST_HEAD(&cp->rx_spare_list); + spin_unlock(&cp->rx_spare_lock); + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } + + INIT_LIST_HEAD(&list); +#if 1 + /* + * Looks like Adrian had protected this with a different + * lock than used everywhere else to manipulate this list. + */ + spin_lock(&cp->rx_inuse_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); +#else + spin_lock(&cp->rx_spare_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_spare_lock); +#endif + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } +} + +/* replenish spares if needed */ +static void cas_spare_recover(struct cas *cp, const int flags) +{ + struct list_head list, *elem, *tmp; + int needed, i; + + /* check inuse list. if we don't need any more free buffers, + * just free it + */ + + /* make a local copy of the list */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_inuse_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + + list_for_each_safe(elem, tmp, &list) { + cas_page_t *page = list_entry(elem, cas_page_t, list); + + if (page_count(page->buffer) > 1) + continue; + + list_del(elem); + spin_lock(&cp->rx_spare_lock); + if (cp->rx_spares_needed > 0) { + list_add(elem, &cp->rx_spare_list); + cp->rx_spares_needed--; + spin_unlock(&cp->rx_spare_lock); + } else { + spin_unlock(&cp->rx_spare_lock); + cas_page_free(cp, page); + } + } + + /* put any inuse buffers back on the list */ + if (!list_empty(&list)) { + spin_lock(&cp->rx_inuse_lock); + list_splice(&list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + + spin_lock(&cp->rx_spare_lock); + needed = cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + if (!needed) + return; + + /* we still need spares, so try to allocate some */ + INIT_LIST_HEAD(&list); + i = 0; + while (i < needed) { + cas_page_t *spare = cas_page_alloc(cp, flags); + if (!spare) + break; + list_add(&spare->list, &list); + i++; + } + + spin_lock(&cp->rx_spare_lock); + list_splice(&list, &cp->rx_spare_list); + cp->rx_spares_needed -= i; + spin_unlock(&cp->rx_spare_lock); +} + +/* pull a page from the list. */ +static cas_page_t *cas_page_dequeue(struct cas *cp) +{ + struct list_head *entry; + int recover; + + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + /* try to do a quick recovery */ + spin_unlock(&cp->rx_spare_lock); + cas_spare_recover(cp, GFP_ATOMIC); + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + if (netif_msg_rx_err(cp)) + printk(KERN_ERR "%s: no spare buffers " + "available.\n", cp->dev->name); + spin_unlock(&cp->rx_spare_lock); + return NULL; + } + } + + entry = cp->rx_spare_list.next; + list_del(entry); + recover = ++cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + + /* trigger the timer to do the recovery */ + if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_spare); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); + schedule_work(&cp->reset_task); +#endif + } + return list_entry(entry, cas_page_t, list); +} + + +static void cas_mif_poll(struct cas *cp, const int enable) +{ + u32 cfg; + + cfg = readl(cp->regs + REG_MIF_CFG); + cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); + + if (cp->phy_type & CAS_PHY_MII_MDIO1) + cfg |= MIF_CFG_PHY_SELECT; + + /* poll and interrupt on link status change. */ + if (enable) { + cfg |= MIF_CFG_POLL_EN; + cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); + cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); + } + writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, + cp->regs + REG_MIF_MASK); + writel(cfg, cp->regs + REG_MIF_CFG); +} + +/* Must be invoked under cp->lock */ +static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) +{ + u16 ctl; +#if 1 + int lcntl; + int changed = 0; + int oldstate = cp->lstate; + int link_was_not_down = !(oldstate == link_down); +#endif + /* Setup link parameters */ + if (!ep) + goto start_aneg; + lcntl = cp->link_cntl; + if (ep->autoneg == AUTONEG_ENABLE) + cp->link_cntl = BMCR_ANENABLE; + else { + cp->link_cntl = 0; + if (ep->speed == SPEED_100) + cp->link_cntl |= BMCR_SPEED100; + else if (ep->speed == SPEED_1000) + cp->link_cntl |= CAS_BMCR_SPEED1000; + if (ep->duplex == DUPLEX_FULL) + cp->link_cntl |= BMCR_FULLDPLX; + } +#if 1 + changed = (lcntl != cp->link_cntl); +#endif +start_aneg: + if (cp->lstate == link_up) { + printk(KERN_INFO "%s: PCS link down.\n", + cp->dev->name); + } else { + if (changed) { + printk(KERN_INFO "%s: link configuration changed\n", + cp->dev->name); + } + } + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + if (!cp->hw_running) + return; +#if 1 + /* + * WTZ: If the old state was link_up, we turn off the carrier + * to replicate everything we do elsewhere on a link-down + * event when we were already in a link-up state.. + */ + if (oldstate == link_up) + netif_carrier_off(cp->dev); + if (changed && link_was_not_down) { + /* + * WTZ: This branch will simply schedule a full reset after + * we explicitly changed link modes in an ioctl. See if this + * fixes the link-problems we were having for forced mode. + */ + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + return; + } +#endif + if (cp->phy_type & CAS_PHY_SERDES) { + u32 val = readl(cp->regs + REG_PCS_MII_CTRL); + + if (cp->link_cntl & BMCR_ANENABLE) { + val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); + cp->lstate = link_aneg; + } else { + if (cp->link_cntl & BMCR_FULLDPLX) + val |= PCS_MII_CTRL_DUPLEX; + val &= ~PCS_MII_AUTONEG_EN; + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + } else { + cas_mif_poll(cp, 0); + ctl = cas_phy_read(cp, MII_BMCR); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | + CAS_BMCR_SPEED1000 | BMCR_ANENABLE); + ctl |= cp->link_cntl; + if (ctl & BMCR_ANENABLE) { + ctl |= BMCR_ANRESTART; + cp->lstate = link_aneg; + } else { + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_phy_write(cp, MII_BMCR, ctl); + cas_mif_poll(cp, 1); + } + + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); +} + +/* Must be invoked under cp->lock. */ +static int cas_reset_mii_phy(struct cas *cp) +{ + int limit = STOP_TRIES_PHY; + u16 val; + + cas_phy_write(cp, MII_BMCR, BMCR_RESET); + udelay(100); + while (limit--) { + val = cas_phy_read(cp, MII_BMCR); + if ((val & BMCR_RESET) == 0) + break; + udelay(10); + } + return (limit <= 0); +} + +static void cas_saturn_firmware_load(struct cas *cp) +{ + cas_saturn_patch_t *patch = cas_saturn_patch; + + cas_phy_powerdown(cp); + + /* expanded memory access mode */ + cas_phy_write(cp, DP83065_MII_MEM, 0x0); + + /* pointer configuration for new firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); + cas_phy_write(cp, DP83065_MII_REGD, 0xbd); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); + cas_phy_write(cp, DP83065_MII_REGD, 0x82); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); + cas_phy_write(cp, DP83065_MII_REGD, 0x0); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); + cas_phy_write(cp, DP83065_MII_REGD, 0x39); + + /* download new firmware */ + cas_phy_write(cp, DP83065_MII_MEM, 0x1); + cas_phy_write(cp, DP83065_MII_REGE, patch->addr); + while (patch->addr) { + cas_phy_write(cp, DP83065_MII_REGD, patch->val); + patch++; + } + + /* enable firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); + cas_phy_write(cp, DP83065_MII_REGD, 0x1); +} + + +/* phy initialization */ +static void cas_phy_init(struct cas *cp) +{ + u16 val; + + /* if we're in MII/GMII mode, set up phy */ + if (CAS_PHY_MII(cp->phy_type)) { + writel(PCS_DATAPATH_MODE_MII, + cp->regs + REG_PCS_DATAPATH_MODE); + + cas_mif_poll(cp, 0); + cas_reset_mii_phy(cp); /* take out of isolate mode */ + + if (PHY_LUCENT_B0 == cp->phy_id) { + /* workaround link up/down issue with lucent */ + cas_phy_write(cp, LUCENT_MII_REG, 0x8000); + cas_phy_write(cp, MII_BMCR, 0x00f1); + cas_phy_write(cp, LUCENT_MII_REG, 0x0); + + } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { + /* workarounds for broadcom phy */ + cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); + + } else if (PHY_BROADCOM_5411 == cp->phy_id) { + val = cas_phy_read(cp, BROADCOM_MII_REG4); + val = cas_phy_read(cp, BROADCOM_MII_REG4); + if (val & 0x0080) { + /* link workaround */ + cas_phy_write(cp, BROADCOM_MII_REG4, + val & ~0x0080); + } + + } else if (cp->cas_flags & CAS_FLAG_SATURN) { + writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? + SATURN_PCFG_FSI : 0x0, + cp->regs + REG_SATURN_PCFG); + + /* load firmware to address 10Mbps auto-negotiation + * issue. NOTE: this will need to be changed if the + * default firmware gets fixed. + */ + if (PHY_NS_DP83065 == cp->phy_id) { + cas_saturn_firmware_load(cp); + } + cas_phy_powerup(cp); + } + + /* advertise capabilities */ + val = cas_phy_read(cp, MII_BMCR); + val &= ~BMCR_ANENABLE; + cas_phy_write(cp, MII_BMCR, val); + udelay(10); + + cas_phy_write(cp, MII_ADVERTISE, + cas_phy_read(cp, MII_ADVERTISE) | + (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + CAS_ADVERTISE_PAUSE | + CAS_ADVERTISE_ASYM_PAUSE)); + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + /* make sure that we don't advertise half + * duplex to avoid a chip issue + */ + val = cas_phy_read(cp, CAS_MII_1000_CTRL); + val &= ~CAS_ADVERTISE_1000HALF; + val |= CAS_ADVERTISE_1000FULL; + cas_phy_write(cp, CAS_MII_1000_CTRL, val); + } + + } else { + /* reset pcs for serdes */ + u32 val; + int limit; + + writel(PCS_DATAPATH_MODE_SERDES, + cp->regs + REG_PCS_DATAPATH_MODE); + + /* enable serdes pins on saturn */ + if (cp->cas_flags & CAS_FLAG_SATURN) + writel(0, cp->regs + REG_SATURN_PCFG); + + /* Reset PCS unit. */ + val = readl(cp->regs + REG_PCS_MII_CTRL); + val |= PCS_MII_RESET; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + limit = STOP_TRIES; + while (limit-- > 0) { + udelay(10); + if ((readl(cp->regs + REG_PCS_MII_CTRL) & + PCS_MII_RESET) == 0) + break; + } + if (limit <= 0) + printk(KERN_WARNING "%s: PCS reset bit would not " + "clear [%08x].\n", cp->dev->name, + readl(cp->regs + REG_PCS_STATE_MACHINE)); + + /* Make sure PCS is disabled while changing advertisement + * configuration. + */ + writel(0x0, cp->regs + REG_PCS_CFG); + + /* Advertise all capabilities except half-duplex. */ + val = readl(cp->regs + REG_PCS_MII_ADVERT); + val &= ~PCS_MII_ADVERT_HD; + val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | + PCS_MII_ADVERT_ASYM_PAUSE); + writel(val, cp->regs + REG_PCS_MII_ADVERT); + + /* enable PCS */ + writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); + + /* pcs workaround: enable sync detect */ + writel(PCS_SERDES_CTRL_SYNCD_EN, + cp->regs + REG_PCS_SERDES_CTRL); + } +} + + +static int cas_pcs_link_check(struct cas *cp) +{ + u32 stat, state_machine; + int retval = 0; + + /* The link status bit latches on zero, so you must + * read it twice in such a case to see a transition + * to the link being up. + */ + stat = readl(cp->regs + REG_PCS_MII_STATUS); + if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) + stat = readl(cp->regs + REG_PCS_MII_STATUS); + + /* The remote-fault indication is only valid + * when autoneg has completed. + */ + if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | + PCS_MII_STATUS_REMOTE_FAULT)) == + (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: PCS RemoteFault\n", + cp->dev->name); + } + + /* work around link detection issue by querying the PCS state + * machine directly. + */ + state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); + if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { + stat &= ~PCS_MII_STATUS_LINK_STATUS; + } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { + stat |= PCS_MII_STATUS_LINK_STATUS; + } + + if (stat & PCS_MII_STATUS_LINK_STATUS) { + if (cp->lstate != link_up) { + if (cp->opened) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + } else if (cp->lstate == link_up) { + cp->lstate = link_down; + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* + * force a reset, as a workaround for the + * link-failure problem. May want to move this to a + * point a bit earlier in the sequence. If we had + * generated a reset a short time ago, we'll wait for + * the link timer to check the status until a + * timer expires (link_transistion_jiffies_valid is + * true when the timer is running.) Instead of using + * a system timer, we just do a check whenever the + * link timer is running - this clears the flag after + * a suitable delay. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_ON_FAILURE; + } + netif_carrier_off(cp->dev); + if (cp->opened && netif_msg_link(cp)) { + printk(KERN_INFO "%s: PCS link down.\n", + cp->dev->name); + } + + /* Cassini only: if you force a mode, there can be + * sync problems on link down. to fix that, the following + * things need to be checked: + * 1) read serialink state register + * 2) read pcs status register to verify link down. + * 3) if link down and serial link == 0x03, then you need + * to global reset the chip. + */ + if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { + /* should check to see if we're in a forced mode */ + stat = readl(cp->regs + REG_PCS_SERDES_STATE); + if (stat == 0x03) + return 1; + } + } else if (cp->lstate == link_down) { + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* force a reset, as a workaround for the + * link-failure problem. May want to move + * this to a point a bit earlier in the + * sequence. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_STILL_FAILED; + } + } + + return retval; +} + +static int cas_pcs_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); + + if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) + return 0; + return cas_pcs_link_check(cp); +} + +static int cas_txmac_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); + + if (!txmac_stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", + cp->dev->name, txmac_stat); + + /* Defer timer expiration is quite normal, + * don't even log the event. + */ + if ((txmac_stat & MAC_TX_DEFER_TIMER) && + !(txmac_stat & ~MAC_TX_DEFER_TIMER)) + return 0; + + spin_lock(&cp->stat_lock[0]); + if (txmac_stat & MAC_TX_UNDERRUN) { + printk(KERN_ERR "%s: TX MAC xmit underrun.\n", + dev->name); + cp->net_stats[0].tx_fifo_errors++; + } + + if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { + printk(KERN_ERR "%s: TX MAC max packet size error.\n", + dev->name); + cp->net_stats[0].tx_errors++; + } + + /* The rest are all cases of one of the 16-bit TX + * counters expiring. + */ + if (txmac_stat & MAC_TX_COLL_NORMAL) + cp->net_stats[0].collisions += 0x10000; + + if (txmac_stat & MAC_TX_COLL_EXCESS) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + + if (txmac_stat & MAC_TX_COLL_LATE) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + spin_unlock(&cp->stat_lock[0]); + + /* We do not keep track of MAC_TX_COLL_FIRST and + * MAC_TX_PEAK_ATTEMPTS events. + */ + return 0; +} + +static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) +{ + cas_hp_inst_t *inst; + u32 val; + int i; + + i = 0; + while ((inst = firmware) && inst->note) { + writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); + + val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); + val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); + + val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); + val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); + val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); + val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); + val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); + val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); + val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); + + val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); + ++firmware; + ++i; + } +} + +static void cas_init_rx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + u32 val; + int i, size; + + /* rx free descriptors */ + val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); + val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); + val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); + if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ + val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); + writel(val, cp->regs + REG_RX_CFG); + + val = (unsigned long) cp->init_rxds[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx desc 2 is for IPSEC packets. however, + * we don't it that for that purpose. + */ + val = (unsigned long) cp->init_rxds[1] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_DB1_LOW); + writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + + REG_PLUS_RX_KICK1); + } + + /* rx completion registers */ + val = (unsigned long) cp->init_rxcs[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx comp 2-4 */ + for (i = 1; i < MAX_RX_COMP_RINGS; i++) { + val = (unsigned long) cp->init_rxcs[i] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + + REG_PLUS_RX_CBN_HI(i)); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_CBN_LOW(i)); + } + } + + /* read selective clear regs to prevent spurious interrupts + * on reset because complete == kick. + * selective clear set up to prevent interrupts on resets + */ + readl(cp->regs + REG_INTR_STATUS_ALIAS); + writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + for (i = 1; i < N_RX_COMP_RINGS; i++) + readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); + + /* 2 is different from 3 and 4 */ + if (N_RX_COMP_RINGS > 1) + writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, + cp->regs + REG_PLUS_ALIASN_CLEAR(1)); + + for (i = 2; i < N_RX_COMP_RINGS; i++) + writel(INTR_RX_DONE_ALT, + cp->regs + REG_PLUS_ALIASN_CLEAR(i)); + } + + /* set up pause thresholds */ + val = CAS_BASE(RX_PAUSE_THRESH_OFF, + cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); + val |= CAS_BASE(RX_PAUSE_THRESH_ON, + cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); + writel(val, cp->regs + REG_RX_PAUSE_THRESH); + + /* zero out dma reassembly buffers */ + for (i = 0; i < 64; i++) { + writel(i, cp->regs + REG_RX_TABLE_ADDR); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); + } + + /* make sure address register is 0 for normal operation */ + writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); + writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); + + /* interrupt mitigation */ +#ifdef USE_RX_BLANK + val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); + val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); + writel(val, cp->regs + REG_RX_BLANK); +#else + writel(0x0, cp->regs + REG_RX_BLANK); +#endif + + /* interrupt generation as a function of low water marks for + * free desc and completion entries. these are used to trigger + * housekeeping for rx descs. we don't use the free interrupt + * as it's not very useful + */ + /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ + val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); + writel(val, cp->regs + REG_RX_AE_THRESH); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); + writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); + } + + /* Random early detect registers. useful for congestion avoidance. + * this should be tunable. + */ + writel(0x0, cp->regs + REG_RX_RED); + + /* receive page sizes. default == 2K (0x800) */ + val = 0; + if (cp->page_size == 0x1000) + val = 0x1; + else if (cp->page_size == 0x2000) + val = 0x2; + else if (cp->page_size == 0x4000) + val = 0x3; + + /* round mtu + offset. constrain to page size. */ + size = cp->dev->mtu + 64; + if (size > cp->page_size) + size = cp->page_size; + + if (size <= 0x400) + i = 0x0; + else if (size <= 0x800) + i = 0x1; + else if (size <= 0x1000) + i = 0x2; + else + i = 0x3; + + cp->mtu_stride = 1 << (i + 10); + val = CAS_BASE(RX_PAGE_SIZE, val); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); + writel(val, cp->regs + REG_RX_PAGE_SIZE); + + /* enable the header parser if desired */ + if (CAS_HP_FIRMWARE == cas_prog_null) + return; + + val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); + val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; + val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); + writel(val, cp->regs + REG_HP_CFG); +} + +static inline void cas_rxc_init(struct cas_rx_comp *rxc) +{ + memset(rxc, 0, sizeof(*rxc)); + rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); +} + +/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] + * flipping is protected by the fact that the chip will not + * hand back the same page index while it's being processed. + */ +static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) +{ + cas_page_t *page = cp->rx_pages[1][index]; + cas_page_t *new; + + if (page_count(page->buffer) == 1) + return page; + + new = cas_page_dequeue(cp); + if (new) { + spin_lock(&cp->rx_inuse_lock); + list_add(&page->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + return new; +} + +/* this needs to be changed if we actually use the ENC RX DESC ring */ +static cas_page_t *cas_page_swap(struct cas *cp, const int ring, + const int index) +{ + cas_page_t **page0 = cp->rx_pages[0]; + cas_page_t **page1 = cp->rx_pages[1]; + + /* swap if buffer is in use */ + if (page_count(page0[index]->buffer) > 1) { + cas_page_t *new = cas_page_spare(cp, index); + if (new) { + page1[index] = page0[index]; + page0[index] = new; + } + } + RX_USED_SET(page0[index], 0); + return page0[index]; +} + +static void cas_clean_rxds(struct cas *cp) +{ + /* only clean ring 0 as ring 1 is used for spare buffers */ + struct cas_rx_desc *rxd = cp->init_rxds[0]; + int i, size; + + /* release all rx flows */ + for (i = 0; i < N_RX_FLOWS; i++) { + struct sk_buff *skb; + while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { + cas_skb_release(skb); + } + } + + /* initialize descriptors */ + size = RX_DESC_RINGN_SIZE(0); + for (i = 0; i < size; i++) { + cas_page_t *page = cas_page_swap(cp, 0, i); + rxd[i].buffer = cpu_to_le64(page->dma_addr); + rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | + CAS_BASE(RX_INDEX_RING, 0)); + } + + cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; + cp->rx_last[0] = 0; + cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); +} + +static void cas_clean_rxcs(struct cas *cp) +{ + int i, j; + + /* take ownership of rx comp descriptors */ + memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); + memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); + for (i = 0; i < N_RX_COMP_RINGS; i++) { + struct cas_rx_comp *rxc = cp->init_rxcs[i]; + for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { + cas_rxc_init(rxc + j); + } + } +} + +#if 0 +/* When we get a RX fifo overflow, the RX unit is probably hung + * so we do the following. + * + * If any part of the reset goes wrong, we return 1 and that causes the + * whole chip to be reset. + */ +static int cas_rxmac_reset(struct cas *cp) +{ + struct net_device *dev = cp->dev; + int limit; + u32 val; + + /* First, reset MAC RX. */ + writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " + "chip.\n", dev->name); + return 1; + } + + /* Second, disable RX DMA. */ + writel(0, cp->regs + REG_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " + "chip.\n", dev->name); + return 1; + } + + mdelay(5); + + /* Execute RX reset command. */ + writel(SW_RESET_RX, cp->regs + REG_SW_RESET); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX reset command will not execute, " + "resetting whole chip.\n", dev->name); + return 1; + } + + /* reset driver rx state */ + cas_clean_rxds(cp); + cas_clean_rxcs(cp); + + /* Now, reprogram the rest of RX unit. */ + cas_init_rx_dma(cp); + + /* re-enable */ + val = readl(cp->regs + REG_RX_CFG); + writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + val = readl(cp->regs + REG_MAC_RX_CFG); + writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + return 0; +} +#endif + +static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); + + if (!stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", + cp->dev->name, stat); + + /* these are all rollovers */ + spin_lock(&cp->stat_lock[0]); + if (stat & MAC_RX_ALIGN_ERR) + cp->net_stats[0].rx_frame_errors += 0x10000; + + if (stat & MAC_RX_CRC_ERR) + cp->net_stats[0].rx_crc_errors += 0x10000; + + if (stat & MAC_RX_LEN_ERR) + cp->net_stats[0].rx_length_errors += 0x10000; + + if (stat & MAC_RX_OVERFLOW) { + cp->net_stats[0].rx_over_errors++; + cp->net_stats[0].rx_fifo_errors++; + } + + /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR + * events. + */ + spin_unlock(&cp->stat_lock[0]); + return 0; +} + +static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); + + if (!stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", + cp->dev->name, stat); + + /* This interrupt is just for pause frame and pause + * tracking. It is useful for diagnostics and debug + * but probably by default we will mask these events. + */ + if (stat & MAC_CTRL_PAUSE_STATE) + cp->pause_entered++; + + if (stat & MAC_CTRL_PAUSE_RECEIVED) + cp->pause_last_time_recvd = (stat >> 16); + + return 0; +} + + +/* Must be invoked under cp->lock. */ +static inline int cas_mdio_link_not_up(struct cas *cp) +{ + u16 val; + + switch (cp->lstate) { + case link_force_ret: + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: Autoneg failed again, keeping" + " forced mode\n", cp->dev->name); + cas_phy_write(cp, MII_BMCR, cp->link_fcntl); + cp->timer_ticks = 5; + cp->lstate = link_force_ok; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_aneg: + val = cas_phy_read(cp, MII_BMCR); + + /* Try forced modes. we try things in the following order: + * 1000 full -> 100 full/half -> 10 half + */ + val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); + val |= BMCR_FULLDPLX; + val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + CAS_BMCR_SPEED1000 : BMCR_SPEED100; + cas_phy_write(cp, MII_BMCR, val); + cp->timer_ticks = 5; + cp->lstate = link_force_try; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_force_try: + /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ + val = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (val & CAS_BMCR_SPEED1000) { /* gigabit */ + val &= ~CAS_BMCR_SPEED1000; + val |= (BMCR_SPEED100 | BMCR_FULLDPLX); + cas_phy_write(cp, MII_BMCR, val); + break; + } + + if (val & BMCR_SPEED100) { + if (val & BMCR_FULLDPLX) /* fd failed */ + val &= ~BMCR_FULLDPLX; + else { /* 100Mbps failed */ + val &= ~BMCR_SPEED100; + } + cas_phy_write(cp, MII_BMCR, val); + break; + } + default: + break; + } + return 0; +} + + +/* must be invoked with cp->lock held */ +static int cas_mii_link_check(struct cas *cp, const u16 bmsr) +{ + int restart; + + if (bmsr & BMSR_LSTATUS) { + /* Ok, here we got a link. If we had it due to a forced + * fallback, and we were configured for autoneg, we + * retry a short autoneg pass. If you know your hub is + * broken, use ethtool ;) + */ + if ((cp->lstate == link_force_try) && + (cp->link_cntl & BMCR_ANENABLE)) { + cp->lstate = link_force_ret; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_mif_poll(cp, 0); + cp->link_fcntl = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (cp->opened && netif_msg_link(cp)) + printk(KERN_INFO "%s: Got link after fallback, retrying" + " autoneg once...\n", cp->dev->name); + cas_phy_write(cp, MII_BMCR, + cp->link_fcntl | BMCR_ANENABLE | + BMCR_ANRESTART); + cas_mif_poll(cp, 1); + + } else if (cp->lstate != link_up) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + if (cp->opened) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + return 0; + } + + /* link not up. if the link was previously up, we restart the + * whole process + */ + restart = 0; + if (cp->lstate == link_up) { + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + + netif_carrier_off(cp->dev); + if (cp->opened && netif_msg_link(cp)) + printk(KERN_INFO "%s: Link down\n", + cp->dev->name); + restart = 1; + + } else if (++cp->timer_ticks > 10) + cas_mdio_link_not_up(cp); + + return restart; +} + +static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MIF_STATUS); + u16 bmsr; + + /* check for a link change */ + if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) + return 0; + + bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); + return cas_mii_link_check(cp, bmsr); +} + +static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); + + if (!stat) + return 0; + + printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, + readl(cp->regs + REG_BIM_DIAG)); + + /* cassini+ has this reserved */ + if ((stat & PCI_ERR_BADACK) && + ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) + printk(" "); + + if (stat & PCI_ERR_DTRTO) + printk(" "); + if (stat & PCI_ERR_OTHER) + printk(" "); + if (stat & PCI_ERR_BIM_DMA_WRITE) + printk(" "); + if (stat & PCI_ERR_BIM_DMA_READ) + printk(" "); + printk("\n"); + + if (stat & PCI_ERR_OTHER) { + u16 cfg; + + /* Interrogate PCI config space for the + * true cause. + */ + pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); + printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", + dev->name, cfg); + if (cfg & PCI_STATUS_PARITY) + printk(KERN_ERR "%s: PCI parity error detected.\n", + dev->name); + if (cfg & PCI_STATUS_SIG_TARGET_ABORT) + printk(KERN_ERR "%s: PCI target abort.\n", + dev->name); + if (cfg & PCI_STATUS_REC_TARGET_ABORT) + printk(KERN_ERR "%s: PCI master acks target abort.\n", + dev->name); + if (cfg & PCI_STATUS_REC_MASTER_ABORT) + printk(KERN_ERR "%s: PCI master abort.\n", dev->name); + if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) + printk(KERN_ERR "%s: PCI system error SERR#.\n", + dev->name); + if (cfg & PCI_STATUS_DETECTED_PARITY) + printk(KERN_ERR "%s: PCI parity error.\n", + dev->name); + + /* Write the error bits back to clear them. */ + cfg &= (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR | + PCI_STATUS_DETECTED_PARITY); + pci_write_config_word(cp->pdev, PCI_STATUS, cfg); + } + + /* For all PCI errors, we should reset the chip. */ + return 1; +} + +/* All non-normal interrupt conditions get serviced here. + * Returns non-zero if we should just exit the interrupt + * handler right now (ie. if we reset the card which invalidates + * all of the other original irq status bits). + */ +static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, + u32 status) +{ + if (status & INTR_RX_TAG_ERROR) { + /* corrupt RX tag framing */ + if (netif_msg_rx_err(cp)) + printk(KERN_DEBUG "%s: corrupt rx tag framing\n", + cp->dev->name); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_RX_LEN_MISMATCH) { + /* length mismatch. */ + if (netif_msg_rx_err(cp)) + printk(KERN_DEBUG "%s: length mismatch for rx frame\n", + cp->dev->name); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_PCS_STATUS) { + if (cas_pcs_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_TX_MAC_STATUS) { + if (cas_txmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_RX_MAC_STATUS) { + if (cas_rxmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MAC_CTRL_STATUS) { + if (cas_mac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MIF_STATUS) { + if (cas_mif_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_PCI_ERROR_STATUS) { + if (cas_pci_interrupt(dev, cp, status)) + goto do_reset; + } + return 0; + +do_reset: +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", + dev->name, status); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + printk(KERN_ERR "reset called in cas_abnormal_irq\n"); + schedule_work(&cp->reset_task); +#endif + return 1; +} + +/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when + * determining whether to do a netif_stop/wakeup + */ +#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) +#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) +static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, + const int len) +{ + unsigned long off = addr + len; + + if (CAS_TABORT(cp) == 1) + return 0; + if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) + return 0; + return TX_TARGET_ABORT_LEN; +} + +static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) +{ + struct cas_tx_desc *txds; + struct sk_buff **skbs; + struct net_device *dev = cp->dev; + int entry, count; + + spin_lock(&cp->tx_lock[ring]); + txds = cp->init_txds[ring]; + skbs = cp->tx_skbs[ring]; + entry = cp->tx_old[ring]; + + count = TX_BUFF_COUNT(ring, entry, limit); + while (entry != limit) { + struct sk_buff *skb = skbs[entry]; + dma_addr_t daddr; + u32 dlen; + int frag; + + if (!skb) { + /* this should never occur */ + entry = TX_DESC_NEXT(ring, entry); + continue; + } + + /* however, we might get only a partial skb release. */ + count -= skb_shinfo(skb)->nr_frags + + + cp->tx_tiny_use[ring][entry].nbufs + 1; + if (count < 0) + break; + + if (netif_msg_tx_done(cp)) + printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", + cp->dev->name, ring, entry); + + skbs[entry] = NULL; + cp->tx_tiny_use[ring][entry].nbufs = 0; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + struct cas_tx_desc *txd = txds + entry; + + daddr = le64_to_cpu(txd->buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd->control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + entry = TX_DESC_NEXT(ring, entry); + + /* tiny buffer may follow */ + if (cp->tx_tiny_use[ring][entry].used) { + cp->tx_tiny_use[ring][entry].used = 0; + entry = TX_DESC_NEXT(ring, entry); + } + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].tx_packets++; + cp->net_stats[ring].tx_bytes += skb->len; + spin_unlock(&cp->stat_lock[ring]); + dev_kfree_skb_irq(skb); + } + cp->tx_old[ring] = entry; + + /* this is wrong for multiple tx rings. the net device needs + * multiple queues for this to do the right thing. we wait + * for 2*packets to be available when using tiny buffers + */ + if (netif_queue_stopped(dev) && + (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) + netif_wake_queue(dev); + spin_unlock(&cp->tx_lock[ring]); +} + +static void cas_tx(struct net_device *dev, struct cas *cp, + u32 status) +{ + int limit, ring; +#ifdef USE_TX_COMPWB + u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); +#endif + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", + cp->dev->name, status, compwb); + /* process all the rings */ + for (ring = 0; ring < N_TX_RINGS; ring++) { +#ifdef USE_TX_COMPWB + /* use the completion writeback registers */ + limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | + CAS_VAL(TX_COMPWB_LSB, compwb); + compwb = TX_COMPWB_NEXT(compwb); +#else + limit = readl(cp->regs + REG_TX_COMPN(ring)); +#endif + if (cp->tx_old[ring] != limit) + cas_tx_ringN(cp, ring, limit); + } +} + + +static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + int entry, const u64 *words, + struct sk_buff **skbref) +{ + int dlen, hlen, len, i, alloclen; + int off, swivel = RX_SWIVEL_OFF_VAL; + struct cas_page *page; + struct sk_buff *skb; + void *addr, *crcaddr; + char *p; + + hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); + dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); + len = hlen + dlen; + + if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) + alloclen = len; + else + alloclen = max(hlen, RX_COPY_MIN); + + skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); + if (skb == NULL) + return -1; + + *skbref = skb; + skb->dev = cp->dev; + skb_reserve(skb, swivel); + + p = skb->data; + addr = crcaddr = NULL; + if (hlen) { /* always copy header pages */ + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + + swivel; + + i = hlen; + if (!dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, 0x100); + p += hlen; + swivel = 0; + } + + + if (alloclen < (hlen + dlen)) { + skb_frag_t *frag = skb_shinfo(skb)->frags; + + /* normal or jumbo packets. we use frags */ + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + if (netif_msg_rx_err(cp)) { + printk(KERN_DEBUG "%s: rx page overflow: " + "%d\n", cp->dev->name, hlen); + } + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + + /* make sure we always copy a header */ + swivel = 0; + if (p == (char *) skb->data) { /* not split */ + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, RX_COPY_MIN); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + off += RX_COPY_MIN; + swivel = RX_COPY_MIN; + RX_USED_ADD(page, cp->mtu_stride); + } else { + RX_USED_ADD(page, hlen); + } + skb_put(skb, alloclen); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen - swivel; + skb->len += hlen - swivel; + + get_page(page->buffer); + frag->page = page->buffer; + frag->page_offset = off; + frag->size = hlen - swivel; + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + hlen = dlen; + off = 0; + + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen; + skb->len += hlen; + frag++; + + get_page(page->buffer); + frag->page = page->buffer; + frag->page_offset = 0; + frag->size = hlen; + RX_USED_ADD(page, hlen + cp->crc_size); + } + + if (cp->crc_size) { + addr = cas_page_map(page->buffer); + crcaddr = addr + off + hlen; + } + + } else { + /* copying packet */ + if (!dlen) + goto end_copy_pkt; + + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + if (netif_msg_rx_err(cp)) { + printk(KERN_DEBUG "%s: rx page overflow: " + "%d\n", cp->dev->name, hlen); + } + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + if (p == (char *) skb->data) /* not split */ + RX_USED_ADD(page, cp->mtu_stride); + else + RX_USED_ADD(page, i); + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + p += hlen; + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr, dlen + cp->crc_size); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, dlen + cp->crc_size); + } +end_copy_pkt: + if (cp->crc_size) { + addr = NULL; + crcaddr = skb->data + alloclen; + } + skb_put(skb, alloclen); + } + + i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); + if (cp->crc_size) { + /* checksum includes FCS. strip it out. */ + i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); + if (addr) + cas_page_unmap(addr); + } + skb->csum = ntohs(i ^ 0xffff); + skb->ip_summed = CHECKSUM_HW; + skb->protocol = eth_type_trans(skb, cp->dev); + return len; +} + + +/* we can handle up to 64 rx flows at a time. we do the same thing + * as nonreassm except that we batch up the buffers. + * NOTE: we currently just treat each flow as a bunch of packets that + * we pass up. a better way would be to coalesce the packets + * into a jumbo packet. to do that, we need to do the following: + * 1) the first packet will have a clean split between header and + * data. save both. + * 2) each time the next flow packet comes in, extend the + * data length and merge the checksums. + * 3) on flow release, fix up the header. + * 4) make sure the higher layer doesn't care. + * because packets get coalesced, we shouldn't run into fragment count + * issues. + */ +static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, + struct sk_buff *skb) +{ + int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); + struct sk_buff_head *flow = &cp->rx_flows[flowid]; + + /* this is protected at a higher layer, so no need to + * do any additional locking here. stick the buffer + * at the end. + */ + __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); + if (words[0] & RX_COMP1_RELEASE_FLOW) { + while ((skb = __skb_dequeue(flow))) { + cas_skb_release(skb); + } + } +} + +/* put rx descriptor back on ring. if a buffer is in use by a higher + * layer, this will need to put in a replacement. + */ +static void cas_post_page(struct cas *cp, const int ring, const int index) +{ + cas_page_t *new; + int entry; + + entry = cp->rx_old[ring]; + + new = cas_page_swap(cp, ring, index); + cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); + cp->init_rxds[ring][entry].index = + cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | + CAS_BASE(RX_INDEX_RING, ring)); + + entry = RX_DESC_ENTRY(ring, entry + 1); + cp->rx_old[ring] = entry; + + if (entry % 4) + return; + + if (ring == 0) + writel(entry, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(entry, cp->regs + REG_PLUS_RX_KICK1); +} + + +/* only when things are bad */ +static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) +{ + unsigned int entry, last, count, released; + int cluster; + cas_page_t **page = cp->rx_pages[ring]; + + entry = cp->rx_old[ring]; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", + cp->dev->name, ring, entry); + + cluster = -1; + count = entry & 0x3; + last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); + released = 0; + while (entry != last) { + /* make a new buffer if it's still in use */ + if (page_count(page[entry]->buffer) > 1) { + cas_page_t *new = cas_page_dequeue(cp); + if (!new) { + /* let the timer know that we need to + * do this again + */ + cp->cas_flags |= CAS_FLAG_RXD_POST(ring); + if (!timer_pending(&cp->link_timer)) + mod_timer(&cp->link_timer, jiffies + + CAS_LINK_FAST_TIMEOUT); + cp->rx_old[ring] = entry; + cp->rx_last[ring] = num ? num - released : 0; + return -ENOMEM; + } + spin_lock(&cp->rx_inuse_lock); + list_add(&page[entry]->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + cp->init_rxds[ring][entry].buffer = + cpu_to_le64(new->dma_addr); + page[entry] = new; + + } + + if (++count == 4) { + cluster = entry; + count = 0; + } + released++; + entry = RX_DESC_ENTRY(ring, entry + 1); + } + cp->rx_old[ring] = entry; + + if (cluster < 0) + return 0; + + if (ring == 0) + writel(cluster, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(cluster, cp->regs + REG_PLUS_RX_KICK1); + return 0; +} + + +/* process a completion ring. packets are set up in three basic ways: + * small packets: should be copied header + data in single buffer. + * large packets: header and data in a single buffer. + * split packets: header in a separate buffer from data. + * data may be in multiple pages. data may be > 256 + * bytes but in a single page. + * + * NOTE: RX page posting is done in this routine as well. while there's + * the capability of using multiple RX completion rings, it isn't + * really worthwhile due to the fact that the page posting will + * force serialization on the single descriptor ring. + */ +static int cas_rx_ringN(struct cas *cp, int ring, int budget) +{ + struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; + int entry, drops; + int npackets = 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", + cp->dev->name, ring, + readl(cp->regs + REG_RX_COMP_HEAD), + cp->rx_new[ring]); + + entry = cp->rx_new[ring]; + drops = 0; + while (1) { + struct cas_rx_comp *rxc = rxcs + entry; + struct sk_buff *skb; + int type, len; + u64 words[4]; + int i, dring; + + words[0] = le64_to_cpu(rxc->word1); + words[1] = le64_to_cpu(rxc->word2); + words[2] = le64_to_cpu(rxc->word3); + words[3] = le64_to_cpu(rxc->word4); + + /* don't touch if still owned by hw */ + type = CAS_VAL(RX_COMP1_TYPE, words[0]); + if (type == 0) + break; + + /* hw hasn't cleared the zero bit yet */ + if (words[3] & RX_COMP4_ZERO) { + break; + } + + /* get info on the packet */ + if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_errors++; + if (words[3] & RX_COMP4_LEN_MISMATCH) + cp->net_stats[ring].rx_length_errors++; + if (words[3] & RX_COMP4_BAD) + cp->net_stats[ring].rx_crc_errors++; + spin_unlock(&cp->stat_lock[ring]); + + /* We'll just return it to Cassini. */ + drop_it: + spin_lock(&cp->stat_lock[ring]); + ++cp->net_stats[ring].rx_dropped; + spin_unlock(&cp->stat_lock[ring]); + goto next; + } + + len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); + if (len < 0) { + ++drops; + goto drop_it; + } + + /* see if it's a flow re-assembly or not. the driver + * itself handles release back up. + */ + if (RX_DONT_BATCH || (type == 0x2)) { + /* non-reassm: these always get released */ + cas_skb_release(skb); + } else { + cas_rx_flow_pkt(cp, words, skb); + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_packets++; + cp->net_stats[ring].rx_bytes += len; + spin_unlock(&cp->stat_lock[ring]); + cp->dev->last_rx = jiffies; + + next: + npackets++; + + /* should it be released? */ + if (words[0] & RX_COMP1_RELEASE_HDR) { + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_DATA) { + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_NEXT) { + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + /* skip to the next entry */ + entry = RX_COMP_ENTRY(ring, entry + 1 + + CAS_VAL(RX_COMP1_SKIP, words[0])); +#ifdef USE_NAPI + if (budget && (npackets >= budget)) + break; +#endif + } + cp->rx_new[ring] = entry; + + if (drops) + printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", + cp->dev->name); + return npackets; +} + + +/* put completion entries back on the ring */ +static void cas_post_rxcs_ringN(struct net_device *dev, + struct cas *cp, int ring) +{ + struct cas_rx_comp *rxc = cp->init_rxcs[ring]; + int last, entry; + + last = cp->rx_cur[ring]; + entry = cp->rx_new[ring]; + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", + dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), + entry); + + /* zero and re-mark descriptors */ + while (last != entry) { + cas_rxc_init(rxc + last); + last = RX_COMP_ENTRY(ring, last + 1); + } + cp->rx_cur[ring] = last; + + if (ring == 0) + writel(last, cp->regs + REG_RX_COMP_TAIL); + else if (cp->cas_flags & CAS_FLAG_REG_PLUS) + writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); +} + + + +/* cassini can use all four PCI interrupts for the completion ring. + * rings 3 and 4 are identical + */ +#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +static inline void cas_handle_irqN(struct net_device *dev, + struct cas *cp, const u32 status, + const int ring) +{ + if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) + cas_post_rxcs_ringN(dev, cp, ring); +} + +static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + int ring; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); + + /* check for shared irq */ + if (status == 0) + return IRQ_NONE; + + ring = (irq == cp->pci_irq_INTC) ? 2 : 3; + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, ring, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + + if (status) + cas_handle_irqN(dev, cp, status, ring); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +#ifdef USE_PCI_INTB +/* everything but rx packets */ +static inline void cas_handle_irq1(struct cas *cp, const u32 status) +{ + if (status & INTR_RX_BUF_UNAVAIL_1) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. */ + cas_post_rxds_ringN(cp, 1, 0); + spin_lock(&cp->stat_lock[1]); + cp->net_stats[1].rx_dropped++; + spin_unlock(&cp->stat_lock[1]); + } + + if (status & INTR_RX_BUF_AE_1) + cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - + RX_AE_FREEN_VAL(1)); + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(cp, 1); +} + +/* ring 2 handles a few more events than 3 and 4 */ +static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + + /* check for shared interrupt */ + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, 1, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + if (status) + cas_handle_irq1(cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +static inline void cas_handle_irq(struct net_device *dev, + struct cas *cp, const u32 status) +{ + /* housekeeping interrupts */ + if (status & INTR_ERROR_MASK) + cas_abnormal_irq(dev, cp, status); + + if (status & INTR_RX_BUF_UNAVAIL) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. + */ + cas_post_rxds_ringN(cp, 0, 0); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_dropped++; + spin_unlock(&cp->stat_lock[0]); + } else if (status & INTR_RX_BUF_AE) { + cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - + RX_AE_FREEN_VAL(0)); + } + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(dev, cp, 0); +} + +static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_INTR_STATUS); + + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & (INTR_TX_ALL | INTR_TX_INTME)) { + cas_tx(dev, cp, status); + status &= ~(INTR_TX_ALL | INTR_TX_INTME); + } + + if (status & INTR_RX_DONE) { +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, 0, 0); +#endif + status &= ~INTR_RX_DONE; + } + + if (status) + cas_handle_irq(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} + + +#ifdef USE_NAPI +static int cas_poll(struct net_device *dev, int *budget) +{ + struct cas *cp = netdev_priv(dev); + int i, enable_intr, todo, credits; + u32 status = readl(cp->regs + REG_INTR_STATUS); + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + cas_tx(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + + /* NAPI rx packets. we spread the credits across all of the + * rxc rings + */ + todo = min(*budget, dev->quota); + + /* to make sure we're fair with the work we loop through each + * ring N_RX_COMP_RING times with a request of + * todo / N_RX_COMP_RINGS + */ + enable_intr = 1; + credits = 0; + for (i = 0; i < N_RX_COMP_RINGS; i++) { + int j; + for (j = 0; j < N_RX_COMP_RINGS; j++) { + credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); + if (credits >= todo) { + enable_intr = 0; + goto rx_comp; + } + } + } + +rx_comp: + *budget -= credits; + dev->quota -= credits; + + /* final rx completion */ + spin_lock_irqsave(&cp->lock, flags); + if (status) + cas_handle_irq(dev, cp, status); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + if (status) + cas_handle_irq1(dev, cp, status); + } +#endif + +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); + if (status) + cas_handle_irqN(dev, cp, status, 2); + } +#endif + +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); + if (status) + cas_handle_irqN(dev, cp, status, 3); + } +#endif + spin_unlock_irqrestore(&cp->lock, flags); + if (enable_intr) { + netif_rx_complete(dev); + cas_unmask_intr(cp); + return 0; + } + return 1; +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cas_netpoll(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + cas_disable_irq(cp, 0); + cas_interrupt(cp->pdev->irq, dev, NULL); + cas_enable_irq(cp, 0); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + /* cas_interrupt1(); */ + } +#endif +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + /* cas_interruptN(); */ + } +#endif +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + /* cas_interruptN(); */ + } +#endif +} +#endif + +static void cas_tx_timeout(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + if (!cp->hw_running) { + printk("%s: hrm.. hw not running!\n", dev->name); + return; + } + + printk(KERN_ERR "%s: MIF_STATE[%08x]\n", + dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); + + printk(KERN_ERR "%s: MAC_STATE[%08x]\n", + dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); + + printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " + "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", + dev->name, + readl(cp->regs + REG_TX_CFG), + readl(cp->regs + REG_MAC_TX_STATUS), + readl(cp->regs + REG_MAC_TX_CFG), + readl(cp->regs + REG_TX_FIFO_PKT_CNT), + readl(cp->regs + REG_TX_FIFO_WRITE_PTR), + readl(cp->regs + REG_TX_FIFO_READ_PTR), + readl(cp->regs + REG_TX_SM_1), + readl(cp->regs + REG_TX_SM_2)); + + printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", + dev->name, + readl(cp->regs + REG_RX_CFG), + readl(cp->regs + REG_MAC_RX_STATUS), + readl(cp->regs + REG_MAC_RX_CFG)); + + printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", + dev->name, + readl(cp->regs + REG_HP_STATE_MACHINE), + readl(cp->regs + REG_HP_STATUS0), + readl(cp->regs + REG_HP_STATUS1), + readl(cp->regs + REG_HP_STATUS2)); + +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + schedule_work(&cp->reset_task); +#endif +} + +static inline int cas_intme(int ring, int entry) +{ + /* Algorithm: IRQ every 1/2 of descriptors. */ + if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) + return 1; + return 0; +} + + +static void cas_write_txd(struct cas *cp, int ring, int entry, + dma_addr_t mapping, int len, u64 ctrl, int last) +{ + struct cas_tx_desc *txd = cp->init_txds[ring] + entry; + + ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); + if (cas_intme(ring, entry)) + ctrl |= TX_DESC_INTME; + if (last) + ctrl |= TX_DESC_EOF; + txd->control = cpu_to_le64(ctrl); + txd->buffer = cpu_to_le64(mapping); +} + +static inline void *tx_tiny_buf(struct cas *cp, const int ring, + const int entry) +{ + return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, + const int entry, const int tentry) +{ + cp->tx_tiny_use[ring][tentry].nbufs++; + cp->tx_tiny_use[ring][entry].used = 1; + return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, + struct sk_buff *skb) +{ + struct net_device *dev = cp->dev; + int entry, nr_frags, frag, tabort, tentry; + dma_addr_t mapping; + unsigned long flags; + u64 ctrl; + u32 len; + + spin_lock_irqsave(&cp->tx_lock[ring], flags); + + /* This is a hard error, log it. */ + if (TX_BUFFS_AVAIL(cp, ring) <= + CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " + "queue awake!\n", dev->name); + return 1; + } + + ctrl = 0; + if (skb->ip_summed == CHECKSUM_HW) { + u64 csum_start_off, csum_stuff_off; + + csum_start_off = (u64) (skb->h.raw - skb->data); + csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); + + ctrl = TX_DESC_CSUM_EN | + CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | + CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); + } + + entry = cp->tx_new[ring]; + cp->tx_skbs[ring][entry] = skb; + + nr_frags = skb_shinfo(skb)->nr_frags; + len = skb_headlen(skb); + mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), len, + PCI_DMA_TODEVICE); + + tentry = entry; + tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); + if (unlikely(tabort)) { + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl | TX_DESC_SOF, 0); + entry = TX_DESC_NEXT(ring, entry); + + memcpy(tx_tiny_buf(cp, ring, entry), skb->data + + len - tabort, tabort); + mapping = tx_tiny_map(cp, ring, entry, tentry); + cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, + (nr_frags == 0)); + } else { + cas_write_txd(cp, ring, entry, mapping, len, ctrl | + TX_DESC_SOF, (nr_frags == 0)); + } + entry = TX_DESC_NEXT(ring, entry); + + for (frag = 0; frag < nr_frags; frag++) { + skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + len = fragp->size; + mapping = pci_map_page(cp->pdev, fragp->page, + fragp->page_offset, len, + PCI_DMA_TODEVICE); + + tabort = cas_calc_tabort(cp, fragp->page_offset, len); + if (unlikely(tabort)) { + void *addr; + + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl, 0); + entry = TX_DESC_NEXT(ring, entry); + + addr = cas_page_map(fragp->page); + memcpy(tx_tiny_buf(cp, ring, entry), + addr + fragp->page_offset + len - tabort, + tabort); + cas_page_unmap(addr); + mapping = tx_tiny_map(cp, ring, entry, tentry); + len = tabort; + } + + cas_write_txd(cp, ring, entry, mapping, len, ctrl, + (frag + 1 == nr_frags)); + entry = TX_DESC_NEXT(ring, entry); + } + + cp->tx_new[ring] = entry; + if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + if (netif_msg_tx_queued(cp)) + printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " + "avail %d\n", + dev->name, ring, entry, skb->len, + TX_BUFFS_AVAIL(cp, ring)); + writel(entry, cp->regs + REG_TX_KICKN(ring)); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + return 0; +} + +static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + /* this is only used as a load-balancing hint, so it doesn't + * need to be SMP safe + */ + static int ring; + + skb = skb_padto(skb, cp->min_frame_size); + if (!skb) + return 0; + + /* XXX: we need some higher-level QoS hooks to steer packets to + * individual queues. + */ + if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) + return 1; + dev->trans_start = jiffies; + return 0; +} + +static void cas_init_tx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + unsigned long off; + u32 val; + int i; + + /* set up tx completion writeback registers. must be 8-byte aligned */ +#ifdef USE_TX_COMPWB + off = offsetof(struct cas_init_block, tx_compwb); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); + writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); +#endif + + /* enable completion writebacks, enable paced mode, + * disable read pipe, and disable pre-interrupt compwbs + */ + val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | + TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | + TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | + TX_CFG_INTR_COMPWB_DIS; + + /* write out tx ring info and tx desc bases */ + for (i = 0; i < MAX_TX_RINGS; i++) { + off = (unsigned long) cp->init_txds[i] - + (unsigned long) cp->init_block; + + val |= CAS_TX_RINGN_BASE(i); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); + writel((desc_dma + off) & 0xffffffff, cp->regs + + REG_TX_DBN_LOW(i)); + /* don't zero out the kick register here as the system + * will wedge + */ + } + writel(val, cp->regs + REG_TX_CFG); + + /* program max burst sizes. these numbers should be different + * if doing QoS. + */ +#ifdef USE_QOS + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x1600, cp->regs + REG_TX_MAXBURST_1); + writel(0x2400, cp->regs + REG_TX_MAXBURST_2); + writel(0x4800, cp->regs + REG_TX_MAXBURST_3); +#else + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x800, cp->regs + REG_TX_MAXBURST_1); + writel(0x800, cp->regs + REG_TX_MAXBURST_2); + writel(0x800, cp->regs + REG_TX_MAXBURST_3); +#endif +} + +/* Must be invoked under cp->lock. */ +static inline void cas_init_dma(struct cas *cp) +{ + cas_init_tx_dma(cp); + cas_init_rx_dma(cp); +} + +/* Must be invoked under cp->lock. */ +static u32 cas_setup_multicast(struct cas *cp) +{ + u32 rxcfg = 0; + int i; + + if (cp->dev->flags & IFF_PROMISC) { + rxcfg |= MAC_RX_CFG_PROMISC_EN; + + } else if (cp->dev->flags & IFF_ALLMULTI) { + for (i=0; i < 16; i++) + writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + + } else { + u16 hash_table[16]; + u32 crc; + struct dev_mc_list *dmi = cp->dev->mc_list; + int i; + + /* use the alternate mac address registers for the + * first 15 multicast addresses + */ + for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { + if (!dmi) { + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); + continue; + } + writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], + cp->regs + REG_MAC_ADDRN(i*3 + 0)); + writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], + cp->regs + REG_MAC_ADDRN(i*3 + 1)); + writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], + cp->regs + REG_MAC_ADDRN(i*3 + 2)); + dmi = dmi->next; + } + + /* use hw hash table for the next series of + * multicast addresses + */ + memset(hash_table, 0, sizeof(hash_table)); + while (dmi) { + crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); + crc >>= 24; + hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + dmi = dmi->next; + } + for (i=0; i < 16; i++) + writel(hash_table[i], cp->regs + + REG_MAC_HASH_TABLEN(i)); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + } + + return rxcfg; +} + +/* must be invoked under cp->stat_lock[N_TX_RINGS] */ +static void cas_clear_mac_err(struct cas *cp) +{ + writel(0, cp->regs + REG_MAC_COLL_NORMAL); + writel(0, cp->regs + REG_MAC_COLL_FIRST); + writel(0, cp->regs + REG_MAC_COLL_EXCESS); + writel(0, cp->regs + REG_MAC_COLL_LATE); + writel(0, cp->regs + REG_MAC_TIMER_DEFER); + writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); + writel(0, cp->regs + REG_MAC_RECV_FRAME); + writel(0, cp->regs + REG_MAC_LEN_ERR); + writel(0, cp->regs + REG_MAC_ALIGN_ERR); + writel(0, cp->regs + REG_MAC_FCS_ERR); + writel(0, cp->regs + REG_MAC_RX_CODE_ERR); +} + + +static void cas_mac_reset(struct cas *cp) +{ + int i; + + /* do both TX and RX reset */ + writel(0x1, cp->regs + REG_MAC_TX_RESET); + writel(0x1, cp->regs + REG_MAC_RX_RESET); + + /* wait for TX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_TX_RESET) == 0) + break; + udelay(10); + } + + /* wait for RX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_RX_RESET) == 0) + break; + udelay(10); + } + + if (readl(cp->regs + REG_MAC_TX_RESET) | + readl(cp->regs + REG_MAC_RX_RESET)) + printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", + cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), + readl(cp->regs + REG_MAC_RX_RESET), + readl(cp->regs + REG_MAC_STATE_MACHINE)); +} + + +/* Must be invoked under cp->lock. */ +static void cas_init_mac(struct cas *cp) +{ + unsigned char *e = &cp->dev->dev_addr[0]; + int i; +#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE + u32 rxcfg; +#endif + cas_mac_reset(cp); + + /* setup core arbitration weight register */ + writel(CAWR_RR_DIS, cp->regs + REG_CAWR); + + /* XXX Use pci_dma_burst_advice() */ +#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) + /* set the infinite burst register for chips that don't have + * pci issues. + */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) + writel(INF_BURST_EN, cp->regs + REG_INF_BURST); +#endif + + writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); + + writel(0x00, cp->regs + REG_MAC_IPG0); + writel(0x08, cp->regs + REG_MAC_IPG1); + writel(0x04, cp->regs + REG_MAC_IPG2); + + /* change later for 802.3z */ + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + + /* min frame + FCS */ + writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); + + /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we + * specify the maximum frame size to prevent RX tag errors on + * oversized frames. + */ + writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | + CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, + (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), + cp->regs + REG_MAC_FRAMESIZE_MAX); + + /* NOTE: crc_size is used as a surrogate for half-duplex. + * workaround saturn half-duplex issue by increasing preamble + * size to 65 bytes. + */ + if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) + writel(0x41, cp->regs + REG_MAC_PA_SIZE); + else + writel(0x07, cp->regs + REG_MAC_PA_SIZE); + writel(0x04, cp->regs + REG_MAC_JAM_SIZE); + writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); + writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); + + writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); + + writel(0, cp->regs + REG_MAC_ADDR_FILTER0); + writel(0, cp->regs + REG_MAC_ADDR_FILTER1); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); + writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); + + /* setup mac address in perfect filter array */ + for (i = 0; i < 45; i++) + writel(0x0, cp->regs + REG_MAC_ADDRN(i)); + + writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); + writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); + writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); + + writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); + writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); + writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); + +#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE + cp->mac_rx_cfg = cas_setup_multicast(cp); +#else + /* WTZ: Do what Adrian did in cas_set_multicast. Doing + * a writel does not seem to be necessary because Cassini + * seems to preserve the configuration when we do the reset. + * If the chip is in trouble, though, it is not clear if we + * can really count on this behavior. cas_set_multicast uses + * spin_lock_irqsave, but we are called only in cas_init_hw and + * cas_init_hw is protected by cas_lock_all, which calls + * spin_lock_irq (so it doesn't need to save the flags, and + * we should be OK for the writel, as that is the only + * difference). + */ + cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); + writel(rxcfg, cp->regs + REG_MAC_RX_CFG); +#endif + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); + + /* Setup MAC interrupts. We want to get all of the interesting + * counter expiration events, but we do not want to hear about + * normal rx/tx as the DMA engine tells us that. + */ + writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + + /* Don't enable even the PAUSE interrupts for now, we + * make no use of those events other than to record them. + */ + writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_pause_thresholds(struct cas *cp) +{ + /* Calculate pause thresholds. Setting the OFF threshold to the + * full RX fifo size effectively disables PAUSE generation + */ + if (cp->rx_fifo_size <= (2 * 1024)) { + cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; + } else { + int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; + if (max_frame * 3 > cp->rx_fifo_size) { + cp->rx_pause_off = 7104; + cp->rx_pause_on = 960; + } else { + int off = (cp->rx_fifo_size - (max_frame * 2)); + int on = off - max_frame; + cp->rx_pause_off = off; + cp->rx_pause_on = on; + } + } +} + +static int cas_vpd_match(const void __iomem *p, const char *str) +{ + int len = strlen(str) + 1; + int i; + + for (i = 0; i < len; i++) { + if (readb(p + i) != str[i]) + return 0; + } + return 1; +} + + +/* get the mac address by reading the vpd information in the rom. + * also get the phy type and determine if there's an entropy generator. + * NOTE: this is a bit convoluted for the following reasons: + * 1) vpd info has order-dependent mac addresses for multinic cards + * 2) the only way to determine the nic order is to use the slot + * number. + * 3) fiber cards don't have bridges, so their slot numbers don't + * mean anything. + * 4) we don't actually know we have a fiber card until after + * the mac addresses are parsed. + */ +static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, + const int offset) +{ + void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; + void __iomem *base, *kstart; + int i, len; + int found = 0; +#define VPD_FOUND_MAC 0x01 +#define VPD_FOUND_PHY 0x02 + + int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ + int mac_off = 0; + + /* give us access to the PROM */ + writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, + cp->regs + REG_BIM_LOCAL_DEV_EN); + + /* check for an expansion rom */ + if (readb(p) != 0x55 || readb(p + 1) != 0xaa) + goto use_random_mac_addr; + + /* search for beginning of vpd */ + base = 0; + for (i = 2; i < EXPANSION_ROM_SIZE; i++) { + /* check for PCIR */ + if ((readb(p + i + 0) == 0x50) && + (readb(p + i + 1) == 0x43) && + (readb(p + i + 2) == 0x49) && + (readb(p + i + 3) == 0x52)) { + base = p + (readb(p + i + 8) | + (readb(p + i + 9) << 8)); + break; + } + } + + if (!base || (readb(base) != 0x82)) + goto use_random_mac_addr; + + i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; + while (i < EXPANSION_ROM_SIZE) { + if (readb(base + i) != 0x90) /* no vpd found */ + goto use_random_mac_addr; + + /* found a vpd field */ + len = readb(base + i + 1) | (readb(base + i + 2) << 8); + + /* extract keywords */ + kstart = base + i + 3; + p = kstart; + while ((p - kstart) < len) { + int klen = readb(p + 2); + int j; + char type; + + p += 3; + + /* look for the following things: + * -- correct length == 29 + * 3 (type) + 2 (size) + + * 18 (strlen("local-mac-address") + 1) + + * 6 (mac addr) + * -- VPD Instance 'I' + * -- VPD Type Bytes 'B' + * -- VPD data length == 6 + * -- property string == local-mac-address + * + * -- correct length == 24 + * 3 (type) + 2 (size) + + * 12 (strlen("entropy-dev") + 1) + + * 7 (strlen("vms110") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'B' + * -- VPD data length == 7 + * -- property string == entropy-dev + * + * -- correct length == 18 + * 3 (type) + 2 (size) + + * 9 (strlen("phy-type") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-type + * + * -- correct length == 23 + * 3 (type) + 2 (size) + + * 14 (strlen("phy-interface") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-interface + */ + if (readb(p) != 'I') + goto next; + + /* finally, check string and length */ + type = readb(p + 3); + if (type == 'B') { + if ((klen == 29) && readb(p + 4) == 6 && + cas_vpd_match(p + 5, + "local-mac-address")) { + if (mac_off++ > offset) + goto next; + + /* set mac address */ + for (j = 0; j < 6; j++) + dev_addr[j] = + readb(p + 23 + j); + goto found_mac; + } + } + + if (type != 'S') + goto next; + +#ifdef USE_ENTROPY_DEV + if ((klen == 24) && + cas_vpd_match(p + 5, "entropy-dev") && + cas_vpd_match(p + 17, "vms110")) { + cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; + goto next; + } +#endif + + if (found & VPD_FOUND_PHY) + goto next; + + if ((klen == 18) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-type")) { + if (cas_vpd_match(p + 14, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } + + if ((klen == 23) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-interface")) { + if (cas_vpd_match(p + 19, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } +found_mac: + found |= VPD_FOUND_MAC; + goto next; + +found_phy: + found |= VPD_FOUND_PHY; + +next: + p += klen; + } + i += len + 3; + } + +use_random_mac_addr: + if (found & VPD_FOUND_MAC) + goto done; + + /* Sun MAC prefix then 3 random bytes. */ + printk(PFX "MAC address not found in ROM VPD\n"); + dev_addr[0] = 0x08; + dev_addr[1] = 0x00; + dev_addr[2] = 0x20; + get_random_bytes(dev_addr + 3, 3); + +done: + writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); + return phy_type; +} + +/* check pci invariants */ +static void cas_check_pci_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + u8 rev; + + cp->cas_flags = 0; + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if ((pdev->vendor == PCI_VENDOR_ID_SUN) && + (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { + if (rev >= CAS_ID_REVPLUS) + cp->cas_flags |= CAS_FLAG_REG_PLUS; + if (rev < CAS_ID_REVPLUS02u) + cp->cas_flags |= CAS_FLAG_TARGET_ABORT; + + /* Original Cassini supports HW CSUM, but it's not + * enabled by default as it can trigger TX hangs. + */ + if (rev < CAS_ID_REV2) + cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; + } else { + /* Only sun has original cassini chips. */ + cp->cas_flags |= CAS_FLAG_REG_PLUS; + + /* We use a flag because the same phy might be externally + * connected. + */ + if ((pdev->vendor == PCI_VENDOR_ID_NS) && + (pdev->device == PCI_DEVICE_ID_NS_SATURN)) + cp->cas_flags |= CAS_FLAG_SATURN; + } +} + + +static int cas_check_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + u32 cfg; + int i; + + /* get page size for rx buffers. */ + cp->page_order = 0; +#ifdef USE_PAGE_ORDER + if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { + /* see if we can allocate larger pages */ + struct page *page = alloc_pages(GFP_ATOMIC, + CAS_JUMBO_PAGE_SHIFT - + PAGE_SHIFT); + if (page) { + __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); + cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; + } else { + printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); + } + } +#endif + cp->page_size = (PAGE_SIZE << cp->page_order); + + /* Fetch the FIFO configurations. */ + cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; + cp->rx_fifo_size = RX_FIFO_SIZE; + + /* finish phy determination. MDIO1 takes precedence over MDIO0 if + * they're both connected. + */ + cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, + PCI_SLOT(pdev->devfn)); + if (cp->phy_type & CAS_PHY_SERDES) { + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; /* no more checking needed */ + } + + /* MII */ + cfg = readl(cp->regs + REG_MIF_CFG); + if (cfg & MIF_CFG_MDIO_1) { + cp->phy_type = CAS_PHY_MII_MDIO1; + } else if (cfg & MIF_CFG_MDIO_0) { + cp->phy_type = CAS_PHY_MII_MDIO0; + } + + cas_mif_poll(cp, 0); + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); + + for (i = 0; i < 32; i++) { + u32 phy_id; + int j; + + for (j = 0; j < 3; j++) { + cp->phy_addr = i; + phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; + phy_id |= cas_phy_read(cp, MII_PHYSID2); + if (phy_id && (phy_id != 0xFFFFFFFF)) { + cp->phy_id = phy_id; + goto done; + } + } + } + printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", + readl(cp->regs + REG_MIF_STATE_MACHINE)); + return -1; + +done: + /* see if we can do gigabit */ + cfg = cas_phy_read(cp, MII_BMSR); + if ((cfg & CAS_BMSR_1000_EXTEND) && + cas_phy_read(cp, CAS_MII_1000_EXTEND)) + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; +} + +/* Must be invoked under cp->lock. */ +static inline void cas_start_dma(struct cas *cp) +{ + int i; + u32 val; + int txfailed = 0; + + /* enable dma */ + val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* enable the mac */ + val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; + writel(val, cp->regs + REG_MAC_TX_CFG); + val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; + writel(val, cp->regs + REG_MAC_RX_CFG); + + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_TX_CFG); + if ((val & MAC_TX_CFG_EN)) + break; + udelay(10); + } + if (i < 0) txfailed = 1; + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_RX_CFG); + if ((val & MAC_RX_CFG_EN)) { + if (txfailed) { + printk(KERN_ERR + "%s: enabling mac failed [tx:%08x:%08x].\n", + cp->dev->name, + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + } + goto enable_rx_done; + } + udelay(10); + } + printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", + cp->dev->name, + (txfailed? "tx,rx":"rx"), + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + +enable_rx_done: + cas_unmask_intr(cp); /* enable interrupts */ + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + writel(0, cp->regs + REG_RX_COMP_TAIL); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + if (N_RX_DESC_RINGS > 1) + writel(RX_DESC_RINGN_SIZE(1) - 4, + cp->regs + REG_PLUS_RX_KICK1); + + for (i = 1; i < N_RX_COMP_RINGS; i++) + writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); + } +} + +/* Must be invoked under cp->lock. */ +static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val = readl(cp->regs + REG_PCS_MII_LPA); + *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; + *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; + if (val & PCS_MII_LPA_ASYM_PAUSE) + *pause |= 0x10; + *spd = 1000; +} + +/* Must be invoked under cp->lock. */ +static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val; + + *fd = 0; + *spd = 10; + *pause = 0; + + /* use GMII registers */ + val = cas_phy_read(cp, MII_LPA); + if (val & CAS_LPA_PAUSE) + *pause = 0x01; + + if (val & CAS_LPA_ASYM_PAUSE) + *pause |= 0x10; + + if (val & LPA_DUPLEX) + *fd = 1; + if (val & LPA_100) + *spd = 100; + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + val = cas_phy_read(cp, CAS_MII_1000_STATUS); + if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) + *spd = 1000; + if (val & CAS_LPA_1000FULL) + *fd = 1; + } +} + +/* A link-up condition has occurred, initialize and enable the + * rest of the chip. + * + * Must be invoked under cp->lock. + */ +static void cas_set_link_modes(struct cas *cp) +{ + u32 val; + int full_duplex, speed, pause; + + full_duplex = 0; + speed = 10; + pause = 0; + + if (CAS_PHY_MII(cp->phy_type)) { + cas_mif_poll(cp, 0); + val = cas_phy_read(cp, MII_BMCR); + if (val & BMCR_ANENABLE) { + cas_read_mii_link_mode(cp, &full_duplex, &speed, + &pause); + } else { + if (val & BMCR_FULLDPLX) + full_duplex = 1; + + if (val & BMCR_SPEED100) + speed = 100; + else if (val & CAS_BMCR_SPEED1000) + speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + 1000 : 100; + } + cas_mif_poll(cp, 1); + + } else { + val = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); + if ((val & PCS_MII_AUTONEG_EN) == 0) { + if (val & PCS_MII_CTRL_DUPLEX) + full_duplex = 1; + } + } + + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", + cp->dev->name, speed, (full_duplex ? "full" : "half")); + + val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; + if (CAS_PHY_MII(cp->phy_type)) { + val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; + if (!full_duplex) + val |= MAC_XIF_DISABLE_ECHO; + } + if (full_duplex) + val |= MAC_XIF_FDPLX_LED; + if (speed == 1000) + val |= MAC_XIF_GMII_MODE; + writel(val, cp->regs + REG_MAC_XIF_CFG); + + /* deal with carrier and collision detect. */ + val = MAC_TX_CFG_IPG_EN; + if (full_duplex) { + val |= MAC_TX_CFG_IGNORE_CARRIER; + val |= MAC_TX_CFG_IGNORE_COLL; + } else { +#ifndef USE_CSMA_CD_PROTO + val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; + val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; +#endif + } + /* val now set up for REG_MAC_TX_CFG */ + + /* If gigabit and half-duplex, enable carrier extension + * mode. increase slot time to 512 bytes as well. + * else, disable it and make sure slot time is 64 bytes. + * also activate checksum bug workaround + */ + if ((speed == 1000) && !full_duplex) { + writel(val | MAC_TX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_TX_CFG); + + val = readl(cp->regs + REG_MAC_RX_CFG); + val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ + writel(val | MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + + writel(0x200, cp->regs + REG_MAC_SLOT_TIME); + + cp->crc_size = 4; + /* minimum size gigabit frame at half duplex */ + cp->min_frame_size = CAS_1000MB_MIN_FRAME; + + } else { + writel(val, cp->regs + REG_MAC_TX_CFG); + + /* checksum bug workaround. don't strip FCS when in + * half-duplex mode + */ + val = readl(cp->regs + REG_MAC_RX_CFG); + if (full_duplex) { + val |= MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 0; + cp->min_frame_size = CAS_MIN_MTU; + } else { + val &= ~MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 4; + cp->min_frame_size = CAS_MIN_FRAME; + } + writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + } + + if (netif_msg_link(cp)) { + if (pause & 0x01) { + printk(KERN_INFO "%s: Pause is enabled " + "(rxfifo: %d off: %d on: %d)\n", + cp->dev->name, + cp->rx_fifo_size, + cp->rx_pause_off, + cp->rx_pause_on); + } else if (pause & 0x10) { + printk(KERN_INFO "%s: TX pause enabled\n", + cp->dev->name); + } else { + printk(KERN_INFO "%s: Pause is disabled\n", + cp->dev->name); + } + } + + val = readl(cp->regs + REG_MAC_CTRL_CFG); + val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); + if (pause) { /* symmetric or asymmetric pause */ + val |= MAC_CTRL_CFG_SEND_PAUSE_EN; + if (pause & 0x01) { /* symmetric pause */ + val |= MAC_CTRL_CFG_RECV_PAUSE_EN; + } + } + writel(val, cp->regs + REG_MAC_CTRL_CFG); + cas_start_dma(cp); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_hw(struct cas *cp, int restart_link) +{ + if (restart_link) + cas_phy_init(cp); + + cas_init_pause_thresholds(cp); + cas_init_mac(cp); + cas_init_dma(cp); + + if (restart_link) { + /* Default aneg parameters */ + cp->timer_ticks = 0; + cas_begin_auto_negotiation(cp, NULL); + } else if (cp->lstate == link_up) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } +} + +/* Must be invoked under cp->lock. on earlier cassini boards, + * SOFT_0 is tied to PCI reset. we use this to force a pci reset, + * let it settle out, and then restore pci state. + */ +static void cas_hard_reset(struct cas *cp) +{ + writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); + udelay(20); + pci_restore_state(cp->pdev); +} + + +static void cas_global_reset(struct cas *cp, int blkflag) +{ + int limit; + + /* issue a global reset. don't use RSTOUT. */ + if (blkflag && !CAS_PHY_MII(cp->phy_type)) { + /* For PCS, when the blkflag is set, we should set the + * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of + * the last autonegotiation from being cleared. We'll + * need some special handling if the chip is set into a + * loopback mode. + */ + writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), + cp->regs + REG_SW_RESET); + } else { + writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); + } + + /* need to wait at least 3ms before polling register */ + mdelay(3); + + limit = STOP_TRIES; + while (limit-- > 0) { + u32 val = readl(cp->regs + REG_SW_RESET); + if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) + goto done; + udelay(10); + } + printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); + +done: + /* enable various BIM interrupts */ + writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | + BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); + + /* clear out pci error status mask for handled errors. + * we don't deal with DMA counter overflows as they happen + * all the time. + */ + writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | + PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | + PCI_ERR_BIM_DMA_READ), cp->regs + + REG_PCI_ERR_STATUS_MASK); + + /* set up for MII by default to address mac rx reset timeout + * issue + */ + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); +} + +static void cas_reset(struct cas *cp, int blkflag) +{ + u32 val; + + cas_mask_intr(cp); + cas_global_reset(cp, blkflag); + cas_mac_reset(cp); + cas_entropy_reset(cp); + + /* disable dma engines. */ + val = readl(cp->regs + REG_TX_CFG); + val &= ~TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + + val = readl(cp->regs + REG_RX_CFG); + val &= ~RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* program header parser */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || + (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { + cas_load_firmware(cp, CAS_HP_FIRMWARE); + } else { + cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); + } + + /* clear out error registers */ + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); +} + +/* Shut down the chip, must be called with pm_sem held. */ +static void cas_shutdown(struct cas *cp) +{ + unsigned long flags; + + /* Make us not-running to avoid timers respawning */ + cp->hw_running = 0; + + del_timer_sync(&cp->link_timer); + + /* Stop the reset task */ +#if 0 + while (atomic_read(&cp->reset_task_pending_mtu) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_all)) + schedule(); + +#else + while (atomic_read(&cp->reset_task_pending)) + schedule(); +#endif + /* Actually stop the chip */ + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + if (cp->cas_flags & CAS_FLAG_SATURN) + cas_phy_powerdown(cp); + cas_unlock_all_restore(cp, flags); +} + +static int cas_change_mtu(struct net_device *dev, int new_mtu) +{ + struct cas *cp = netdev_priv(dev); + + if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) + return -EINVAL; + + dev->mtu = new_mtu; + if (!netif_running(dev) || !netif_device_present(dev)) + return 0; + + /* let the reset task handle it */ +#if 1 + atomic_inc(&cp->reset_task_pending); + if ((cp->phy_type & CAS_PHY_SERDES)) { + atomic_inc(&cp->reset_task_pending_all); + } else { + atomic_inc(&cp->reset_task_pending_mtu); + } + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? + CAS_RESET_ALL : CAS_RESET_MTU); + printk(KERN_ERR "reset called in cas_change_mtu\n"); + schedule_work(&cp->reset_task); +#endif + + flush_scheduled_work(); + return 0; +} + +static void cas_clean_txd(struct cas *cp, int ring) +{ + struct cas_tx_desc *txd = cp->init_txds[ring]; + struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; + u64 daddr, dlen; + int i, size; + + size = TX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + int frag; + + if (skbs[i] == NULL) + continue; + + skb = skbs[i]; + skbs[i] = NULL; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + int ent = i & (size - 1); + + /* first buffer is never a tiny buffer and so + * needs to be unmapped. + */ + daddr = le64_to_cpu(txd[ent].buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd[ent].control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + + if (frag != skb_shinfo(skb)->nr_frags) { + i++; + + /* next buffer might by a tiny buffer. + * skip past it. + */ + ent = i & (size - 1); + if (cp->tx_tiny_use[ring][ent].used) + i++; + } + } + dev_kfree_skb_any(skb); + } + + /* zero out tiny buf usage */ + memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); +} + +/* freed on close */ +static inline void cas_free_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int i, size; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if (page[i]) { + cas_page_free(cp, page[i]); + page[i] = NULL; + } + } +} + +static void cas_free_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cas_free_rx_desc(cp, i); +} + +/* Must be invoked under cp->lock. */ +static void cas_clean_rings(struct cas *cp) +{ + int i; + + /* need to clean all tx rings */ + memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); + memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); + for (i = 0; i < N_TX_RINGS; i++) + cas_clean_txd(cp, i); + + /* zero out init block */ + memset(cp->init_block, 0, sizeof(struct cas_init_block)); + cas_clean_rxds(cp); + cas_clean_rxcs(cp); +} + +/* allocated on open */ +static inline int cas_alloc_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int size, i = 0; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) + return -1; + } + return 0; +} + +static int cas_alloc_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) { + if (cas_alloc_rx_desc(cp, i) < 0) { + cas_free_rxds(cp); + return -1; + } + } + return 0; +} + +static void cas_reset_task(void *data) +{ + struct cas *cp = (struct cas *) data; +#if 0 + int pending = atomic_read(&cp->reset_task_pending); +#else + int pending_all = atomic_read(&cp->reset_task_pending_all); + int pending_spare = atomic_read(&cp->reset_task_pending_spare); + int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); + + if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { + /* We can have more tasks scheduled than actually + * needed. + */ + atomic_dec(&cp->reset_task_pending); + return; + } +#endif + /* The link went down, we reset the ring, but keep + * DMA stopped. Use this function for reset + * on error as well. + */ + if (cp->hw_running) { + unsigned long flags; + + /* Make sure we don't get interrupts or tx packets */ + netif_device_detach(cp->dev); + cas_lock_all_save(cp, flags); + + if (cp->opened) { + /* We call cas_spare_recover when we call cas_open. + * but we do not initialize the lists cas_spare_recover + * uses until cas_open is called. + */ + cas_spare_recover(cp, GFP_ATOMIC); + } +#if 1 + /* test => only pending_spare set */ + if (!pending_all && !pending_mtu) + goto done; +#else + if (pending == CAS_RESET_SPARE) + goto done; +#endif + /* when pending == CAS_RESET_ALL, the following + * call to cas_init_hw will restart auto negotiation. + * Setting the second argument of cas_reset to + * !(pending == CAS_RESET_ALL) will set this argument + * to 1 (avoiding reinitializing the PHY for the normal + * PCS case) when auto negotiation is not restarted. + */ +#if 1 + cas_reset(cp, !(pending_all > 0)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, (pending_all > 0)); +#else + cas_reset(cp, !(pending == CAS_RESET_ALL)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, pending == CAS_RESET_ALL); +#endif + +done: + cas_unlock_all_restore(cp, flags); + netif_device_attach(cp->dev); + } +#if 1 + atomic_sub(pending_all, &cp->reset_task_pending_all); + atomic_sub(pending_spare, &cp->reset_task_pending_spare); + atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); + atomic_dec(&cp->reset_task_pending); +#else + atomic_set(&cp->reset_task_pending, 0); +#endif +} + +static void cas_link_timer(unsigned long data) +{ + struct cas *cp = (struct cas *) data; + int mask, pending = 0, reset = 0; + unsigned long flags; + + if (link_transition_timeout != 0 && + cp->link_transition_jiffies_valid && + ((jiffies - cp->link_transition_jiffies) > + (link_transition_timeout))) { + /* One-second counter so link-down workaround doesn't + * cause resets to occur so fast as to fool the switch + * into thinking the link is down. + */ + cp->link_transition_jiffies_valid = 0; + } + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + cas_lock_tx(cp); + cas_entropy_gather(cp); + + /* If the link task is still pending, we just + * reschedule the link timer + */ +#if 1 + if (atomic_read(&cp->reset_task_pending_all) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_mtu)) + goto done; +#else + if (atomic_read(&cp->reset_task_pending)) + goto done; +#endif + + /* check for rx cleaning */ + if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { + int i, rmask; + + for (i = 0; i < MAX_RX_DESC_RINGS; i++) { + rmask = CAS_FLAG_RXD_POST(i); + if ((mask & rmask) == 0) + continue; + + /* post_rxds will do a mod_timer */ + if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { + pending = 1; + continue; + } + cp->cas_flags &= ~rmask; + } + } + + if (CAS_PHY_MII(cp->phy_type)) { + u16 bmsr; + cas_mif_poll(cp, 0); + bmsr = cas_phy_read(cp, MII_BMSR); + /* WTZ: Solaris driver reads this twice, but that + * may be due to the PCS case and the use of a + * common implementation. Read it twice here to be + * safe. + */ + bmsr = cas_phy_read(cp, MII_BMSR); + cas_mif_poll(cp, 1); + readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ + reset = cas_mii_link_check(cp, bmsr); + } else { + reset = cas_pcs_link_check(cp); + } + + if (reset) + goto done; + + /* check for tx state machine confusion */ + if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { + u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); + u32 wptr, rptr; + int tlm = CAS_VAL(MAC_SM_TLM, val); + + if (((tlm == 0x5) || (tlm == 0x3)) && + (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { + if (netif_msg_tx_err(cp)) + printk(KERN_DEBUG "%s: tx err: " + "MAC_STATE[%08x]\n", + cp->dev->name, val); + reset = 1; + goto done; + } + + val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); + wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); + rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); + if ((val == 0) && (wptr != rptr)) { + if (netif_msg_tx_err(cp)) + printk(KERN_DEBUG "%s: tx err: " + "TX_FIFO[%08x:%08x:%08x]\n", + cp->dev->name, val, wptr, rptr); + reset = 1; + } + + if (reset) + cas_hard_reset(cp); + } + +done: + if (reset) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + printk(KERN_ERR "reset called in cas_link_timer\n"); + schedule_work(&cp->reset_task); +#endif + } + + if (!pending) + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + cas_unlock_tx(cp); + spin_unlock_irqrestore(&cp->lock, flags); +} + +/* tiny buffers are used to avoid target abort issues with + * older cassini's + */ +static void cas_tx_tiny_free(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + if (!cp->tx_tiny_bufs[i]) + continue; + + pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], + cp->tx_tiny_dvma[i]); + cp->tx_tiny_bufs[i] = NULL; + } +} + +static int cas_tx_tiny_alloc(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + cp->tx_tiny_bufs[i] = + pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i]); + if (!cp->tx_tiny_bufs[i]) { + cas_tx_tiny_free(cp); + return -1; + } + } + return 0; +} + + +static int cas_open(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + int hw_was_up, err; + unsigned long flags; + + down(&cp->pm_sem); + + hw_was_up = cp->hw_running; + + /* The power-management semaphore protects the hw_running + * etc. state so it is safe to do this bit without cp->lock + */ + if (!cp->hw_running) { + /* Reset the chip */ + cas_lock_all_save(cp, flags); + /* We set the second arg to cas_reset to zero + * because cas_init_hw below will have its second + * argument set to non-zero, which will force + * autonegotiation to start. + */ + cas_reset(cp, 0); + cp->hw_running = 1; + cas_unlock_all_restore(cp, flags); + } + + if (cas_tx_tiny_alloc(cp) < 0) + return -ENOMEM; + + /* alloc rx descriptors */ + err = -ENOMEM; + if (cas_alloc_rxds(cp) < 0) + goto err_tx_tiny; + + /* allocate spares */ + cas_spare_init(cp); + cas_spare_recover(cp, GFP_KERNEL); + + /* We can now request the interrupt as we know it's masked + * on the controller. cassini+ has up to 4 interrupts + * that can be used, but you need to do explicit pci interrupt + * mapping to expose them + */ + if (request_irq(cp->pdev->irq, cas_interrupt, + SA_SHIRQ, dev->name, (void *) dev)) { + printk(KERN_ERR "%s: failed to request irq !\n", + cp->dev->name); + err = -EAGAIN; + goto err_spare; + } + + /* init hw */ + cas_lock_all_save(cp, flags); + cas_clean_rings(cp); + cas_init_hw(cp, !hw_was_up); + cp->opened = 1; + cas_unlock_all_restore(cp, flags); + + netif_start_queue(dev); + up(&cp->pm_sem); + return 0; + +err_spare: + cas_spare_free(cp); + cas_free_rxds(cp); +err_tx_tiny: + cas_tx_tiny_free(cp); + up(&cp->pm_sem); + return err; +} + +static int cas_close(struct net_device *dev) +{ + unsigned long flags; + struct cas *cp = netdev_priv(dev); + + /* Make sure we don't get distracted by suspend/resume */ + down(&cp->pm_sem); + + netif_stop_queue(dev); + + /* Stop traffic, mark us closed */ + cas_lock_all_save(cp, flags); + cp->opened = 0; + cas_reset(cp, 0); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + + free_irq(cp->pdev->irq, (void *) dev); + cas_spare_free(cp); + cas_free_rxds(cp); + cas_tx_tiny_free(cp); + up(&cp->pm_sem); + return 0; +} + +static struct { + const char name[ETH_GSTRING_LEN]; +} ethtool_cassini_statnames[] = { + {"collisions"}, + {"rx_bytes"}, + {"rx_crc_errors"}, + {"rx_dropped"}, + {"rx_errors"}, + {"rx_fifo_errors"}, + {"rx_frame_errors"}, + {"rx_length_errors"}, + {"rx_over_errors"}, + {"rx_packets"}, + {"tx_aborted_errors"}, + {"tx_bytes"}, + {"tx_dropped"}, + {"tx_errors"}, + {"tx_fifo_errors"}, + {"tx_packets"} +}; +#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN) + +static struct { + const int offsets; /* neg. values for 2nd arg to cas_read_phy */ +} ethtool_register_table[] = { + {-MII_BMSR}, + {-MII_BMCR}, + {REG_CAWR}, + {REG_INF_BURST}, + {REG_BIM_CFG}, + {REG_RX_CFG}, + {REG_HP_CFG}, + {REG_MAC_TX_CFG}, + {REG_MAC_RX_CFG}, + {REG_MAC_CTRL_CFG}, + {REG_MAC_XIF_CFG}, + {REG_MIF_CFG}, + {REG_PCS_CFG}, + {REG_SATURN_PCFG}, + {REG_PCS_MII_STATUS}, + {REG_PCS_STATE_MACHINE}, + {REG_MAC_COLL_EXCESS}, + {REG_MAC_COLL_LATE} +}; +#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) +#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) + +static u8 *cas_get_regs(struct cas *cp) +{ + u8 *ptr = kmalloc(CAS_MAX_REGS, GFP_KERNEL); + u8 *p; + int i; + unsigned long flags; + + if (!ptr) + return NULL; + + spin_lock_irqsave(&cp->lock, flags); + for (i = 0, p = ptr; i < CAS_REG_LEN ; i ++, p += sizeof(u32)) { + u16 hval; + u32 val; + if (ethtool_register_table[i].offsets < 0) { + hval = cas_phy_read(cp, + -ethtool_register_table[i].offsets); + val = hval; + } else { + val= readl(cp->regs+ethtool_register_table[i].offsets); + } + memcpy(p, (u8 *)&val, sizeof(u32)); + } + spin_unlock_irqrestore(&cp->lock, flags); + + return ptr; +} + +static struct net_device_stats *cas_get_stats(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + struct net_device_stats *stats = cp->net_stats; + unsigned long flags; + int i; + unsigned long tmp; + + /* we collate all of the stats into net_stats[N_TX_RING] */ + if (!cp->hw_running) + return stats + N_TX_RINGS; + + /* collect outstanding stats */ + /* WTZ: the Cassini spec gives these as 16 bit counters but + * stored in 32-bit words. Added a mask of 0xffff to be safe, + * in case the chip somehow puts any garbage in the other bits. + * Also, counter usage didn't seem to mach what Adrian did + * in the parts of the code that set these quantities. Made + * that consistent. + */ + spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); + stats[N_TX_RINGS].rx_crc_errors += + readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; + stats[N_TX_RINGS].rx_frame_errors += + readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; + stats[N_TX_RINGS].rx_length_errors += + readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; +#if 1 + tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + + (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); + stats[N_TX_RINGS].tx_aborted_errors += tmp; + stats[N_TX_RINGS].collisions += + tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); +#else + stats[N_TX_RINGS].tx_aborted_errors += + readl(cp->regs + REG_MAC_COLL_EXCESS); + stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + + readl(cp->regs + REG_MAC_COLL_LATE); +#endif + cas_clear_mac_err(cp); + + /* saved bits that are unique to ring 0 */ + spin_lock(&cp->stat_lock[0]); + stats[N_TX_RINGS].collisions += stats[0].collisions; + stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; + stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; + stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; + stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; + stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; + spin_unlock(&cp->stat_lock[0]); + + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock(&cp->stat_lock[i]); + stats[N_TX_RINGS].rx_length_errors += + stats[i].rx_length_errors; + stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; + stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; + stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; + stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; + stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; + stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; + stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; + stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; + stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; + memset(stats + i, 0, sizeof(struct net_device_stats)); + spin_unlock(&cp->stat_lock[i]); + } + spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); + return stats + N_TX_RINGS; +} + + +static void cas_set_multicast(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + u32 rxcfg, rxcfg_new; + unsigned long flags; + int limit = STOP_TRIES; + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + rxcfg = readl(cp->regs + REG_MAC_RX_CFG); + + /* disable RX MAC and wait for completion */ + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { + if (!limit--) + break; + udelay(10); + } + + /* disable hash filter and wait for completion */ + limit = STOP_TRIES; + rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { + if (!limit--) + break; + udelay(10); + } + + /* program hash filters */ + cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); + rxcfg |= rxcfg_new; + writel(rxcfg, cp->regs + REG_MAC_RX_CFG); + spin_unlock_irqrestore(&cp->lock, flags); +} + +/* Eventually add support for changing the advertisement + * on autoneg. + */ +static int cas_ethtool_ioctl(struct net_device *dev, void *ep_user) +{ + struct cas *cp = netdev_priv(dev); + u16 bmcr; + int full_duplex, speed, pause; + struct ethtool_cmd ecmd; + unsigned long flags; + enum link_state linkstate = link_up; + + if (copy_from_user(&ecmd, ep_user, sizeof(ecmd))) + return -EFAULT; + + switch(ecmd.cmd) { + case ETHTOOL_GDRVINFO: { + struct ethtool_drvinfo info = { cmd: ETHTOOL_GDRVINFO }; + + strncpy(info.driver, DRV_MODULE_NAME, + ETHTOOL_BUSINFO_LEN); + strncpy(info.version, DRV_MODULE_VERSION, + ETHTOOL_BUSINFO_LEN); + info.fw_version[0] = '\0'; + strncpy(info.bus_info, pci_name(cp->pdev), + ETHTOOL_BUSINFO_LEN); + info.regdump_len = cp->casreg_len < CAS_MAX_REGS ? + cp->casreg_len : CAS_MAX_REGS; + info.n_stats = CAS_NUM_STAT_KEYS; + if (copy_to_user(ep_user, &info, sizeof(info))) + return -EFAULT; + + return 0; + } + + case ETHTOOL_GSET: + ecmd.advertising = 0; + ecmd.supported = SUPPORTED_Autoneg; + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + ecmd.supported |= SUPPORTED_1000baseT_Full; + ecmd.advertising |= ADVERTISED_1000baseT_Full; + } + + /* Record PHY settings if HW is on. */ + spin_lock_irqsave(&cp->lock, flags); + bmcr = 0; + linkstate = cp->lstate; + if (CAS_PHY_MII(cp->phy_type)) { + ecmd.port = PORT_MII; + ecmd.transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? + XCVR_INTERNAL : XCVR_EXTERNAL; + ecmd.phy_address = cp->phy_addr; + ecmd.advertising |= ADVERTISED_TP | ADVERTISED_MII | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + ecmd.supported |= + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII); + + if (cp->hw_running) { + cas_mif_poll(cp, 0); + bmcr = cas_phy_read(cp, MII_BMCR); + cas_read_mii_link_mode(cp, &full_duplex, + &speed, &pause); + cas_mif_poll(cp, 1); + } + + } else { + ecmd.port = PORT_FIBRE; + ecmd.transceiver = XCVR_INTERNAL; + ecmd.phy_address = 0; + ecmd.supported |= SUPPORTED_FIBRE; + ecmd.advertising |= ADVERTISED_FIBRE; + + if (cp->hw_running) { + /* pcs uses the same bits as mii */ + bmcr = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, + &speed, &pause); + } + } + spin_unlock_irqrestore(&cp->lock, flags); + + if (bmcr & BMCR_ANENABLE) { + ecmd.advertising |= ADVERTISED_Autoneg; + ecmd.autoneg = AUTONEG_ENABLE; + ecmd.speed = ((speed == 10) ? + SPEED_10 : + ((speed == 1000) ? + SPEED_1000 : SPEED_100)); + ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + } else { + ecmd.autoneg = AUTONEG_DISABLE; + ecmd.speed = + (bmcr & CAS_BMCR_SPEED1000) ? + SPEED_1000 : + ((bmcr & BMCR_SPEED100) ? SPEED_100: + SPEED_10); + ecmd.duplex = + (bmcr & BMCR_FULLDPLX) ? + DUPLEX_FULL : DUPLEX_HALF; + } + if (linkstate != link_up) { + /* Force these to "unknown" if the link is not up and + * autonogotiation in enabled. We can set the link + * speed to 0, but not ecmd.duplex, + * because its legal values are 0 and 1. Ethtool will + * print the value reported in parentheses after the + * word "Unknown" for unrecognized values. + * + * If in forced mode, we report the speed and duplex + * settings that we configured. + */ + if (cp->link_cntl & BMCR_ANENABLE) { + ecmd.speed = 0; + ecmd.duplex = 0xff; + } else { + ecmd.speed = SPEED_10; + if (cp->link_cntl & BMCR_SPEED100) { + ecmd.speed = SPEED_100; + } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { + ecmd.speed = SPEED_1000; + } + ecmd.duplex = (cp->link_cntl & BMCR_FULLDPLX)? + DUPLEX_FULL : DUPLEX_HALF; + } + } + if (copy_to_user(ep_user, &ecmd, sizeof(ecmd))) + return -EFAULT; + return 0; + + case ETHTOOL_SSET: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Verify the settings we care about. */ + if (ecmd.autoneg != AUTONEG_ENABLE && + ecmd.autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (ecmd.autoneg == AUTONEG_DISABLE && + ((ecmd.speed != SPEED_1000 && + ecmd.speed != SPEED_100 && + ecmd.speed != SPEED_10) || + (ecmd.duplex != DUPLEX_HALF && + ecmd.duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Apply settings and restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, &ecmd); + spin_unlock_irqrestore(&cp->lock, flags); + return 0; + + case ETHTOOL_NWAY_RST: + if ((cp->link_cntl & BMCR_ANENABLE) == 0) + return -EINVAL; + + /* Restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, NULL); + spin_unlock_irqrestore(&cp->lock, flags); + + return 0; + + case ETHTOOL_GWOL: + case ETHTOOL_SWOL: + break; /* doesn't exist */ + + /* get link status */ + case ETHTOOL_GLINK: { + struct ethtool_value edata = { cmd: ETHTOOL_GLINK }; + + edata.data = (cp->lstate == link_up); + if (copy_to_user(ep_user, &edata, sizeof(edata))) + return -EFAULT; + return 0; + } + + /* get message-level */ + case ETHTOOL_GMSGLVL: { + struct ethtool_value edata = { cmd: ETHTOOL_GMSGLVL }; + + edata.data = cp->msg_enable; + if (copy_to_user(ep_user, &edata, sizeof(edata))) + return -EFAULT; + return 0; + } + + /* set message-level */ + case ETHTOOL_SMSGLVL: { + struct ethtool_value edata; + + if (!capable(CAP_NET_ADMIN)) { + return (-EPERM); + } + if (copy_from_user(&edata, ep_user, sizeof(edata))) + return -EFAULT; + cp->msg_enable = edata.data; + return 0; + } + + case ETHTOOL_GREGS: { + struct ethtool_regs edata; + u8 *ptr; + int len = cp->casreg_len < CAS_MAX_REGS ? + cp->casreg_len: CAS_MAX_REGS; + + if (copy_from_user(&edata, ep_user, sizeof (edata))) + return -EFAULT; + + if (edata.len > len) + edata.len = len; + edata.version = 0; + if (copy_to_user (ep_user, &edata, sizeof(edata))) + return -EFAULT; + + /* cas_get_regs handles locks (cp->lock). */ + ptr = cas_get_regs(cp); + if (ptr == NULL) + return -ENOMEM; + if (copy_to_user(ep_user + sizeof (edata), ptr, edata.len)) + return -EFAULT; + + kfree(ptr); + return (0); + } + case ETHTOOL_GSTRINGS: { + struct ethtool_gstrings edata; + int len; + + if (copy_from_user(&edata, ep_user, sizeof(edata))) + return -EFAULT; + + len = edata.len; + switch(edata.string_set) { + case ETH_SS_STATS: + edata.len = (len < CAS_NUM_STAT_KEYS) ? + len : CAS_NUM_STAT_KEYS; + if (copy_to_user(ep_user, &edata, sizeof(edata))) + return -EFAULT; + + if (copy_to_user(ep_user + sizeof(edata), + ðtool_cassini_statnames, + (edata.len * ETH_GSTRING_LEN))) + return -EFAULT; + return 0; + default: + return -EINVAL; + } + } + case ETHTOOL_GSTATS: { + int i = 0; + u64 *tmp; + struct ethtool_stats edata; + struct net_device_stats *stats; + int len; + + if (copy_from_user(&edata, ep_user, sizeof(edata))) + return -EFAULT; + + len = edata.n_stats; + stats = cas_get_stats(cp->dev); + edata.cmd = ETHTOOL_GSTATS; + edata.n_stats = (len < CAS_NUM_STAT_KEYS) ? + len : CAS_NUM_STAT_KEYS; + if (copy_to_user(ep_user, &edata, sizeof (edata))) + return -EFAULT; + + tmp = kmalloc(sizeof(u64)*CAS_NUM_STAT_KEYS, GFP_KERNEL); + if (tmp) { + tmp[i++] = stats->collisions; + tmp[i++] = stats->rx_bytes; + tmp[i++] = stats->rx_crc_errors; + tmp[i++] = stats->rx_dropped; + tmp[i++] = stats->rx_errors; + tmp[i++] = stats->rx_fifo_errors; + tmp[i++] = stats->rx_frame_errors; + tmp[i++] = stats->rx_length_errors; + tmp[i++] = stats->rx_over_errors; + tmp[i++] = stats->rx_packets; + tmp[i++] = stats->tx_aborted_errors; + tmp[i++] = stats->tx_bytes; + tmp[i++] = stats->tx_dropped; + tmp[i++] = stats->tx_errors; + tmp[i++] = stats->tx_fifo_errors; + tmp[i++] = stats->tx_packets; + BUG_ON(i != CAS_NUM_STAT_KEYS); + + i = copy_to_user(ep_user + sizeof(edata), + tmp, sizeof(u64)*edata.n_stats); + kfree(tmp); + } else { + return -ENOMEM; + } + if (i) + return -EFAULT; + return 0; + } + } + + return -EOPNOTSUPP; +} + +static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct cas *cp = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; + unsigned long flags; + int rc = -EOPNOTSUPP; + + /* Hold the PM semaphore while doing ioctl's or we may collide + * with open/close and power management and oops. + */ + down(&cp->pm_sem); + switch (cmd) { + case SIOCETHTOOL: + rc = cas_ethtool_ioctl(dev, ifr->ifr_data); + break; + + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = cp->phy_addr; + /* Fallthrough... */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + rc = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + break; + default: + break; + }; + + up(&cp->pm_sem); + return rc; +} + +static int __devinit cas_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int cas_version_printed = 0; + unsigned long casreg_base, casreg_len; + struct net_device *dev; + struct cas *cp; + int i, err, pci_using_dac; + u16 pci_cmd; + u8 orig_cacheline_size = 0, cas_cacheline_size = 0; + + if (cas_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR PFX "Cannot enable PCI device, " + "aborting.\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + printk(KERN_ERR PFX "Cannot find proper PCI device " + "base address, aborting.\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + dev = alloc_etherdev(sizeof(*cp)); + if (!dev) { + printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + err = -ENOMEM; + goto err_out_disable_pdev; + } + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = pci_request_regions(pdev, dev->name); + if (err) { + printk(KERN_ERR PFX "Cannot obtain PCI resources, " + "aborting.\n"); + goto err_out_free_netdev; + } + pci_set_master(pdev); + + /* we must always turn on parity response or else parity + * doesn't get generated properly. disable SERR/PERR as well. + * in addition, we want to turn MWI on. + */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_SERR; + pci_cmd |= PCI_COMMAND_PARITY; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + pci_set_mwi(pdev); + /* + * On some architectures, the default cache line size set + * by pci_set_mwi reduces perforamnce. We have to increase + * it for this case. To start, we'll print some configuration + * data. + */ +#if 1 + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, + &orig_cacheline_size); + if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { + cas_cacheline_size = + (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? + CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; + if (pci_write_config_byte(pdev, + PCI_CACHE_LINE_SIZE, + cas_cacheline_size)) { + printk(KERN_ERR PFX "Could not set PCI cache " + "line size\n"); + goto err_write_cacheline; + } + } +#endif + + + /* Configure DMA attributes. */ + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, + DMA_64BIT_MASK); + if (err < 0) { + printk(KERN_ERR PFX "Unable to obtain 64-bit DMA " + "for consistent allocations\n"); + goto err_out_free_res; + } + + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_ERR PFX "No usable DMA configuration, " + "aborting.\n"); + goto err_out_free_res; + } + pci_using_dac = 0; + } + + casreg_base = pci_resource_start(pdev, 0); + casreg_len = pci_resource_len(pdev, 0); + + cp = netdev_priv(dev); + cp->pdev = pdev; +#if 1 + /* A value of 0 indicates we never explicitly set it */ + cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; +#endif + cp->dev = dev; + cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : + cassini_debug; + + cp->link_transition = LINK_TRANSITION_UNKNOWN; + cp->link_transition_jiffies_valid = 0; + + spin_lock_init(&cp->lock); + spin_lock_init(&cp->rx_inuse_lock); + spin_lock_init(&cp->rx_spare_lock); + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock_init(&cp->stat_lock[i]); + spin_lock_init(&cp->tx_lock[i]); + } + spin_lock_init(&cp->stat_lock[N_TX_RINGS]); + init_MUTEX(&cp->pm_sem); + + init_timer(&cp->link_timer); + cp->link_timer.function = cas_link_timer; + cp->link_timer.data = (unsigned long) cp; + +#if 1 + /* Just in case the implementation of atomic operations + * change so that an explicit initialization is necessary. + */ + atomic_set(&cp->reset_task_pending, 0); + atomic_set(&cp->reset_task_pending_all, 0); + atomic_set(&cp->reset_task_pending_spare, 0); + atomic_set(&cp->reset_task_pending_mtu, 0); +#endif + INIT_WORK(&cp->reset_task, cas_reset_task, cp); + + /* Default link parameters */ + if (link_mode >= 0 && link_mode <= 6) + cp->link_cntl = link_modes[link_mode]; + else + cp->link_cntl = BMCR_ANENABLE; + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + netif_carrier_off(cp->dev); + cp->timer_ticks = 0; + + /* give us access to cassini registers */ + cp->regs = ioremap(casreg_base, casreg_len); + if (cp->regs == 0UL) { + printk(KERN_ERR PFX "Cannot map device registers, " + "aborting.\n"); + goto err_out_free_res; + } + cp->casreg_len = casreg_len; + + pci_save_state(pdev); + cas_check_pci_invariants(cp); + cas_hard_reset(cp); + cas_reset(cp, 0); + if (cas_check_invariants(cp)) + goto err_out_iounmap; + + cp->init_block = (struct cas_init_block *) + pci_alloc_consistent(pdev, sizeof(struct cas_init_block), + &cp->block_dvma); + if (!cp->init_block) { + printk(KERN_ERR PFX "Cannot allocate init block, " + "aborting.\n"); + goto err_out_iounmap; + } + + for (i = 0; i < N_TX_RINGS; i++) + cp->init_txds[i] = cp->init_block->txds[i]; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cp->init_rxds[i] = cp->init_block->rxds[i]; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cp->init_rxcs[i] = cp->init_block->rxcs[i]; + + for (i = 0; i < N_RX_FLOWS; i++) + skb_queue_head_init(&cp->rx_flows[i]); + + dev->open = cas_open; + dev->stop = cas_close; + dev->hard_start_xmit = cas_start_xmit; + dev->get_stats = cas_get_stats; + dev->set_multicast_list = cas_set_multicast; + dev->do_ioctl = cas_ioctl; + dev->tx_timeout = cas_tx_timeout; + dev->watchdog_timeo = CAS_TX_TIMEOUT; + dev->change_mtu = cas_change_mtu; +#ifdef USE_NAPI + dev->poll = cas_poll; + dev->weight = 64; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = cas_netpoll; +#endif + dev->irq = pdev->irq; + dev->dma = 0; + + /* Cassini features. */ + if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + if (register_netdev(dev)) { + printk(KERN_ERR PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_free_consistent; + } + + i = readl(cp->regs + REG_BIM_CFG); + printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " + "Ethernet[%d] ", dev->name, + (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", + (i & BIM_CFG_32BIT) ? "32" : "64", + (i & BIM_CFG_66MHZ) ? "66" : "33", + (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); + + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], + i == 5 ? ' ' : ':'); + printk("\n"); + + pci_set_drvdata(pdev, dev); + cp->hw_running = 1; + cas_entropy_reset(cp); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + return 0; + +err_out_free_consistent: + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + +err_out_iounmap: + down(&cp->pm_sem); + if (cp->hw_running) + cas_shutdown(cp); + up(&cp->pm_sem); + + iounmap((void *) cp->regs); + + +err_out_free_res: + pci_release_regions(pdev); + +err_write_cacheline: + /* Try to restore it in case the error occured after we + * set it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); + +err_out_free_netdev: + free_netdev(dev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return -ENODEV; +} + +static void __devexit cas_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp; + if (!dev) + return; + + cp = netdev_priv(dev); + unregister_netdev(dev); + + down(&cp->pm_sem); + flush_scheduled_work(); + if (cp->hw_running) + cas_shutdown(cp); + up(&cp->pm_sem); + +#if 1 + if (cp->orig_cacheline_size) { + /* Restore the cache line size if we had modified + * it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + cp->orig_cacheline_size); + } +#endif + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + iounmap((void *) cp->regs); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int cas_suspend(struct pci_dev *pdev, u32 state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + /* We hold the PM semaphore during entire driver + * sleep time + */ + down(&cp->pm_sem); + + /* If the driver is opened, we stop the DMA */ + if (cp->opened) { + netif_device_detach(dev); + + cas_lock_all_save(cp, flags); + + /* We can set the second arg of cas_reset to 0 + * because on resume, we'll call cas_init_hw with + * its second arg set so that autonegotiation is + * restarted. + */ + cas_reset(cp, 0); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + } + + if (cp->hw_running) + cas_shutdown(cp); + + return 0; +} + +static int cas_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + + printk(KERN_INFO "%s: resuming\n", dev->name); + + cas_hard_reset(cp); + if (cp->opened) { + unsigned long flags; + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + cp->hw_running = 1; + cas_clean_rings(cp); + cas_init_hw(cp, 1); + cas_unlock_all_restore(cp, flags); + + netif_device_attach(dev); + } + up(&cp->pm_sem); + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver cas_driver = { + .name = DRV_MODULE_NAME, + .id_table = cas_pci_tbl, + .probe = cas_init_one, + .remove = __devexit_p(cas_remove_one), +#ifdef CONFIG_PM + .suspend = cas_suspend, + .resume = cas_resume +#endif +}; + +static int __init cas_init(void) +{ + if (linkdown_timeout > 0) + link_transition_timeout = linkdown_timeout * HZ; + else + link_transition_timeout = 0; + + return pci_module_init(&cas_driver); +} + +static void __exit cas_cleanup(void) +{ + pci_unregister_driver(&cas_driver); +} + +module_init(cas_init); +module_exit(cas_cleanup); diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h new file mode 100644 index 000000000000..88063ef16cf6 --- /dev/null +++ b/drivers/net/cassini.h @@ -0,0 +1,4425 @@ +/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * vendor id: 0x108E (Sun Microsystems, Inc.) + * device id: 0xabba (Cassini) + * revision ids: 0x01 = Cassini + * 0x02 = Cassini rev 2 + * 0x10 = Cassini+ + * 0x11 = Cassini+ 0.2u + * + * vendor id: 0x100b (National Semiconductor) + * device id: 0x0035 (DP83065/Saturn) + * revision ids: 0x30 = Saturn B2 + * + * rings are all offset from 0. + * + * there are two clock domains: + * PCI: 33/66MHz clock + * chip: 125MHz clock + */ + +#ifndef _CASSINI_H +#define _CASSINI_H + +/* cassini register map: 2M memory mapped in 32-bit memory space accessible as + * 32-bit words. there is no i/o port access. REG_ addresses are + * shared between cassini and cassini+. REG_PLUS_ addresses only + * appear in cassini+. REG_MINUS_ addresses only appear in cassini. + */ +#define CAS_ID_REV2 0x02 +#define CAS_ID_REVPLUS 0x10 +#define CAS_ID_REVPLUS02u 0x11 +#define CAS_ID_REVSATURNB2 0x30 + +/** global resources **/ + +/* this register sets the weights for the weighted round robin arbiter. e.g., + * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit + * for its next turn to access the pci bus. + * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8 + * DEFAULT: 0x0, SIZE: 5 bits + */ +#define REG_CAWR 0x0004 /* core arbitration weight */ +#define CAWR_RX_DMA_WEIGHT_SHIFT 0 +#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */ +#define CAWR_TX_DMA_WEIGHT_SHIFT 2 +#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */ +#define CAWR_RR_DIS 0x10 /* [4] */ + +/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst + * sizes determined by length of packet or descriptor transfer and the + * max length allowed by the target. + * DEFAULT: 0x0, SIZE: 1 bit + */ +#define REG_INF_BURST 0x0008 /* infinite burst enable reg */ +#define INF_BURST_EN 0x1 /* enable */ + +/* top level interrupts [0-9] are auto-cleared to 0 when the status + * register is read. second level interrupts [13 - 18] are cleared at + * the source. tx completion register 3 is replicated in [19 - 31] + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS 0x000C /* interrupt status register */ +#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set + xferred from host queue to + TX FIFO */ +#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into + TX FIFO. i.e., + TX Kick == TX complete. if + PACED_MODE set, then TX FIFO + also empty */ +#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx + FIFO */ +#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred + from RX FIFO to host mem. + RX completion reg updated. + may be delayed by recv + intr blanking. */ +#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers. + RX Kick == RX complete */ +#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion + ring to post descriptors. + RX complete head incr to + almost reach RX complete + tail */ +#define INTR_RX_BUF_AE 0x00000100 /* less than the + programmable threshold # + of free descr avail for + hw use */ +#define INTR_RX_COMP_AF 0x00000200 /* less than the + programmable threshold # + of descr spaces for hw + use in completion descr + ring */ +#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC != + len of non-reassembly pkt + from fifo during DMA or + header parser provides TCP + header and payload size > + MAC packet size. + FATAL ERROR */ +#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this + bit will be set if an interrupt + generated on the pci bus. useful + when driver is polling for + interrupts */ +#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */ +#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has + at least 1 unmasked interrupt + set */ +#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least + 1 unmasked interrupt set */ +#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the + BIF has at least 1 unmasked + interrupt set */ +#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion + 3 reg data */ +#define INTR_TX_COMP_3_SHIFT 19 +#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \ + INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \ + INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \ + INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \ + INTR_MAC_CTRL_STATUS) + +/* determines which status events will cause an interrupt. layout same + * as REG_INTR_STATUS. + * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits + */ +#define REG_INTR_MASK 0x0010 /* Interrupt mask */ + +/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS. + * useful when driver is polling for interrupts. layout same as REG_INTR_MASK. + * DEFAULT: 0x00000000, SIZE: 12 bits + */ +#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask + (used w/ status alias) */ +/* same as REG_INTR_STATUS except that only bits cleared are those selected by + * REG_ALIAS_CLEAR + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias + (selective clear) */ + +/* DEFAULT: 0x0, SIZE: 3 bits */ +#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */ +#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+. + set if no ACK64# during ABS64 cycle + in Cassini. */ +#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if + no read retry after 2^15 clocks */ +#define PCI_ERR_OTHER 0x04 /* other PCI errors */ +#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during + DMA. unused in cassini. */ + +/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event + * causes an interrupt to be generated. + * DEFAULT: 0x7, SIZE: 3 bits + */ +#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */ + +/* used to configure PCI related parameters that are not in PCI config space. + * DEFAULT: 0bxx000, SIZE: 5 bits + */ +#define REG_BIM_CFG 0x1008 /* BIM Configuration */ +#define BIM_CFG_RESERVED0 0x001 /* reserved */ +#define BIM_CFG_RESERVED1 0x002 /* reserved */ +#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */ +#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */ +#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */ +#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */ +#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */ +#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */ +#define BIM_CFG_RESERVED2 0x100 /* reserved */ +#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global + reset. reserved in Cassini. */ +#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended. + reserved in Cassini. */ +#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0. + reserved in Cassini. */ + +/* DEFAULT: 0x00000000, SIZE: 32 bits */ +#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */ +#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state + machine bits [21:0] */ +#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state + machine bits [6:0] */ + +/* writing to SW_RESET_TX and SW_RESET_RX will issue a global + * reset. poll until TX and RX read back as 0's for completion. + */ +#define REG_SW_RESET 0x1010 /* Software reset */ +#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low). + resets PHY and anything else + connected to RSTOUT#. RSTOUT# + is also activated by local PCI + reset when hot-swap is being + done. */ +#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with + this bit set, PCS and SLINK + modules won't be reset. + i.e., link won't drop. */ +#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */ +#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_WB_ACK + 0b011: ARB_WB_WAT + 0b100: ARB_RB_ACK + 0b101: ARB_RB_WAT + 0b110: ARB_RB_END + 0b111: ARB_WB_END */ +#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits: + 0b00: RD_PCI_WAT + 0b01: RD_PCI_RDY + 0b11: RD_PCI_ACK */ +#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits: + 0b00: AD_IDL_RX + 0b01: AD_ACK_RX + 0b10: AD_ACK_TX + 0b11: AD_IDL_TX */ +#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits + 0b00: WR_PCI_WAT + 0b01: WR_PCI_RDY + 0b11: WR_PCI_ACK */ +#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_TX_ACK + 0b011: ARB_TX_WAT + 0b100: ARB_RX_ACK + 0b110: ARB_RX_WAT */ + +/* Cassini only. 64-bit register used to check PCI datapath. when read, + * value written has both lower and upper 32-bit halves rotated to the right + * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF + */ +#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test + Cassini+: reserved */ + +/* output enables are provided for each device's chip select and for the rest + * of the outputs from cassini to its local bus devices. two sw programmable + * bits are connected to general purpus control/status bits. + * DEFAULT: 0x7 + */ +#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device + output EN. default: 0x7 */ +#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and + OE signal output enable on the + local bus interface. these + are shared between both local + bus devices. tristate when 0. */ +#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */ +#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip + select output enable */ +#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */ +#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */ +#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */ + +/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR + * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI. + * _DATA_HI should be the last access of the sequence. + * DEFAULT: undefined + */ +#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for + purposes. */ +#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */ +#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1 + read buffer access = 0 */ +/* DEFAULT: undefined */ +#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */ +#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */ + +/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer. + * bit auto-clears when done with status read from _SUMMARY and _PASS bits. + */ +#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST + control/status */ +#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */ +#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read + buffer. */ +#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write + buffer. Cassini only. reserved + in Cassini+. */ +#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */ +#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */ +#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST. + Cassini only. reserved in + Cassini+. */ + +/* ASUN: i'm not sure what this does as it's not in the spec. + * DEFAULT: 0xFC + */ +#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux + select register */ + +/* enable probe monitoring mode and select data appearing on the P_A* bus. bit + * values for _SEL_HI_MASK and _SEL_LOW_MASK: + * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w, + * wtc empty r, post pci) + * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp, + * pci rpkt comp, txdma wr req, txdma wr ack, + * txdma wr rdy, txdma wr xfr done) + * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd, + * rd arb state, rd pci state) + * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state, + * wrpci state) + * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8] + * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24] + * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40] + * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56] + * the following are not available in Cassini: + * 0xc: rx probe[7:0] 0xd: tx probe[7:0] + * 0xe: hp probe[7:0] 0xf: mac probe[7:0] + */ +#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */ +#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be + driven on local bus P_A[15:0] + for debugging */ +#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals: + 0x03 = mac[1:0] + 0x0C = rx[1:0] + 0x30 = tx[1:0] + 0xC0 = hp[1:0] */ +#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear + on P_A[15:8]. see above for + values. */ +#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear + on P_A[7:0]. see above for + values. */ + +/* values mean the same thing as REG_INTR_MASK excep that it's for INTB. + DEFAULT: 0x1F */ +#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask + register 2 for INTB */ +#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16) +/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to + * all of the alternate (2-4) INTR registers while _1 corresponds to only + * _MASK_1 and _STATUS_1 registers. + * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers + */ +#define INTR_RX_DONE_ALT 0x01 +#define INTR_RX_COMP_FULL_ALT 0x02 +#define INTR_RX_COMP_AF_ALT 0x04 +#define INTR_RX_BUF_UNAVAIL_1 0x08 +#define INTR_RX_BUF_AE_1 0x10 /* almost empty */ +#define INTRN_MASK_RX_EN 0x80 +#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \ + INTR_RX_COMP_FULL_ALT | \ + INTR_RX_COMP_AF_ALT | \ + INTR_RX_BUF_UNAVAIL_1 | \ + INTR_RX_BUF_AE_1) +#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status + register 2 for INTB. default: 0x1F */ +#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16) +#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the + flags are set. enables desc ring. */ + +#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask + register 2 for INTB */ +#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16) + +#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status + register alias 2 for INTB */ +#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16) + +#define REG_SATURN_PCFG 0x106c /* pin configuration register for + integrated macphy */ + +#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */ +#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */ +#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */ +#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */ +#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */ +#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode. + 0 = normal */ +#define SATURN_PCFG_MTP 0x00000080 /* test point select */ +#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 = + GMII on SERDES pins for + monitoring. */ +#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all + pins configed as outputs. + for power saving when using + internal phy. */ +#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl + polarity from strapping + value. + 1 = mac core led ctrl + polarity active low. */ + + +/** transmit dma registers **/ +#define MAX_TX_RINGS_SHIFT 2 +#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT) +#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1) + +/* TX configuration. + * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8 + * DEFAULT: 0x3F000001 + */ +#define REG_TX_CFG 0x2004 /* TX config */ +#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA + will stop after xfer of current + buffer has been completed. */ +#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be + accessed w/ FIFO addr + and data registers. + TX DMA should be + disabled. */ +#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in + ring 1. */ +#define TX_CFG_DESC_RING0_SHIFT 2 +#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4) +#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4) +#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after + TX FIFO becomes empty. + if 0, TX_ALL set + if descr queue empty. */ +#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */ +#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at + the end of every packet kicked + through Q1. */ +#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at + the end of every packet kicked + through Q2. */ +#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at + the end of every packet kicked + through Q3 */ +#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at + the end of every packet kicked + through Q4 */ +#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion + writeback */ +#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port + connection + 0b00: tx mac req, + tx mac retry req, + tx ack and tx tag. + 0b01: txdma rd req, + txdma rd ack, + txdma rd rdy, + txdma rd type0 + 0b11: txdma wr req, + txdma wr ack, + txdma wr rdy, + txdma wr xfr done. */ +#define TX_CFG_CTX_SEL_SHIFT 30 + +/* 11-bit counters that point to next location in FIFO to be loaded/retrieved. + * used for diagnostics only. + */ +#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */ +#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write + pointer. temp hold reg. + diagnostics only. */ +#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */ +#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read + pointer */ + +/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */ +#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */ + +/* current state of all state machines in TX */ +#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */ +#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */ +#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */ +#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine. + = 0x01 when TX disabled. */ +#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */ +#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller + state machine */ +#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */ + +#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */ +#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */ +#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */ +#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */ + +/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented + * while the upper 23 bits are taken from the TX descriptor + */ +#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */ +#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */ + +/* 13 bit registers written by driver w/ descriptor value that follows + * last valid xmit descriptor. kick # and complete # values are used by + * the xmit dma engine to control tx descr fetching. if > 1 valid + * tx descr is available within the cache line being read, cassini will + * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro. + */ +#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */ +#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4) +#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */ +#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4) + +/* values of TX_COMPLETE_1-4 are written. each completion register + * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment. + * NOTE: completion reg values are only written back prior to TX_INTME and + * TX_ALL interrupts. at all other times, the most up-to-date index values + * should be obtained from the REG_TX_COMPLETE_# registers. + * here's the layout: + * offset from base addr completion # byte + * 0 TX_COMPLETE_1_MSB + * 1 TX_COMPLETE_1_LSB + * 2 TX_COMPLETE_2_MSB + * 3 TX_COMPLETE_2_LSB + * 4 TX_COMPLETE_3_MSB + * 5 TX_COMPLETE_3_LSB + * 6 TX_COMPLETE_4_MSB + * 7 TX_COMPLETE_4_LSB + */ +#define TX_COMPWB_SIZE 8 +#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back + base low */ +#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back + base high */ +#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL +#define TX_COMPWB_MSB_SHIFT 0 +#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL +#define TX_COMPWB_LSB_SHIFT 8 +#define TX_COMPWB_NEXT(x) ((x) >> 16) + +/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must + * be 2KB-aligned. */ +#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */ +#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */ +#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8) +#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8) + +/* 16-bit registers hold weights for the weighted round-robin of the + * four CBQ TX descr rings. weights correspond to # bytes xferred from + * host to TXFIFO in a round of WRR arbitration. can be set + * dynamically with new weights set upon completion of the current + * packet transfer from host memory to TXFIFO. a dummy write to any of + * these registers causes a queue1 pre-emption with all historical bw + * deficit data reset to 0 (useful when congestion requires a + * pre-emption/re-allocation of network bandwidth + */ +#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */ +#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */ +#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */ +#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */ + +/* diagnostics access to any TX FIFO location. every access is 65 + * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit. + * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag + * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if + * TX FIFO data integrity is desired, TX DMA should be + * disabled. _DATA_HI_Tx should be the last access of the sequence. + */ +#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */ +#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */ +#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */ +#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */ +#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */ +#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */ + +/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST + * passed for the specified memory + */ +#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */ +#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST + controller state machine */ +#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */ +#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */ +#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */ +#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */ +#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */ +#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self + clears on completion. */ + +/** receive dma registers **/ +#define MAX_RX_DESC_RINGS 2 +#define MAX_RX_COMP_RINGS 4 + +/* receive DMA channel configuration. default: 0x80910 + * free ring size = (1 << n)*32 -> [32 - 8k] + * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9 + * DEFAULT: 0x80910 + */ +#define REG_RX_CFG 0x4000 /* RX config */ +#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops + channel as soon as current + frame xfer has completed. + driver should disable MAC + for 200ms before disabling + RX */ +#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX + free desc ring. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING_SHIFT 1 +#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete + ring. def: 0x8 = 32k */ +#define RX_CFG_COMP_RING_SHIFT 5 +#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc + batching. def: 0x0 = + enabled */ +#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st + data byte of the packet + w/in 8 byte boundares. + this swivels the data + DMA'ed to header + buffers, jumbo buffers + when header split is not + requested and MTU sized + buffers. def: 0x2 */ +#define RX_CFG_SWIVEL_SHIFT 10 + +/* cassini+ only */ +#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in + RX free desc ring 2. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING1_SHIFT 16 + + +/* the page size register allows cassini chips to do the following with + * received data: + * [--------------------------------------------------------------] page + * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad] + * |--------------| = PAGE_SIZE_BUFFER_STRIDE + * page = PAGE_SIZE + * offset = PAGE_SIZE_MTU_OFF + * for the above example, MTU_BUFFER_COUNT = 4. + * NOTE: as is apparent, you need to ensure that the following holds: + * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE + * DEFAULT: 0x48002002 (8k pages) + */ +#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */ +#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to + by receive descriptors. + if jumbo buffers are + supported the page size + should not be < 8k. + 0b00 = 2k, 0b01 = 4k + 0b10 = 8k, 0b11 = 16k + DEFAULT: 8k */ +#define RX_PAGE_SIZE_SHIFT 0 +#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw + packs into a page. + DEFAULT: 4 */ +#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11 +#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate + each MTU buffer + + offset from each + other. + 0b00 = 1k, 0b01 = 2k + 0b10 = 4k, 0b11 = 8k + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27 +#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that + hw writes the MTU buffer + into. + 0b00 = 0, + 0b01 = 64 bytes + 0b10 = 96, 0b11 = 128 + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30 + +/* 11-bit counter points to next location in RX FIFO to be loaded/read. + * shadow write pointers enable retries in case of early receive aborts. + * DEFAULT: 0x0. generated on 64-bit boundaries. + */ +#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */ +#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */ +#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write + pointer */ +#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read + pointer */ +#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read + pointer. (8-bit counter) */ + +/* current state of RX DMA state engines + other info + * DEFAULT: 0x0 + */ +#define REG_RX_DEBUG 0x401C /* RX debug */ +#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC: + 0x0 = idle, 0x1 = load_bop + 0x2 = load 1, 0x3 = load 2 + 0x4 = load 3, 0x5 = load 4 + 0x6 = last detect + 0x7 = wait req + 0x8 = wait req statuss 1st + 0x9 = load st + 0xa = bubble mac + 0xb = error */ +#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and + RX FIFO: + 0x0 = idle, 0x1 = hp xfr + 0x2 = wait hp ready + 0x3 = wait flow code + 0x4 = fifo xfer + 0x5 = make status + 0x6 = csum ready + 0x7 = error */ +#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine + w/ MAC: + 0x0 = idle + 0x1 = wait xoff ack + 0x2 = wait xon + 0x3 = wait xon ack */ +#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine + states: + 0x0 = idle data + 0x1 = header begin + 0x2 = xfer header + 0x3 = xfer header ld + 0x4 = mtu begin + 0x5 = xfer mtu + 0x6 = xfer mtu ld + 0x7 = jumbo begin + 0x8 = xfer jumbo + 0x9 = xfer jumbo ld + 0xa = reas begin + 0xb = xfer reas + 0xc = flush tag + 0xd = xfer reas ld + 0xe = error + 0xf = bubble idle */ +#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine + states: + 0x0 = idle desc + 0x1 = wait ack + 0x9 = wait ack 2 + 0x2 = fetch desc 1 + 0xa = fetch desc 2 + 0x3 = load ptrs + 0x4 = wait dma + 0x5 = wait ack batch + 0x6 = post batch + 0x7 = xfr done */ +#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the + interrupt queue */ +#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer + of the interrupt queue */ + +/* flow control frames are emmitted using two PAUSE thresholds: + * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg + * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes. + * PAUSE thresholds defined in terms of FIFO occupancy and may be translated + * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames + * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max + * value is is 0x6F. + * DEFAULT: 0x00078 + */ +#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */ +#define RX_PAUSE_THRESH_QUANTUM 64 +#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when + RX FIFO occupancy > + value*64B */ +#define RX_PAUSE_THRESH_OFF_SHIFT 0 +#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after + emitting XOFF PAUSE when RX + FIFO occupancy falls below + this value*64B. must be + < XOFF threshold. if = + RX_FIFO_SIZE< XON frames are + never emitted. */ +#define RX_PAUSE_THRESH_ON_SHIFT 12 + +/* 13-bit register used to control RX desc fetching and intr generation. if 4+ + * valid RX descriptors are available, Cassini will read 4 at a time. + * writing N means that all desc up to *but* excluding N are available. N must + * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned. + * DEFAULT: 0 on reset + */ +#define REG_RX_KICK 0x4024 /* RX kick reg */ + +/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings. + * lower 13 bits of the low register are hard-wired to 0. + */ +#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring + base low */ +#define REG_RX_DB_HI 0x402C /* RX descriptor ring + base hi */ +#define REG_RX_CB_LOW 0x4030 /* RX completion ring + base low */ +#define REG_RX_CB_HI 0x4034 /* RX completion ring + base hi */ +/* 13-bit register indicate desc used by cassini for receive frames. used + * for diagnostic purposes. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP 0x4038 /* (ro) RX completion */ + +/* HEAD and TAIL are used to control RX desc posting and interrupt + * generation. hw moves the head register to pass ownership to sw. sw + * moves the tail register to pass ownership back to hw. to give all + * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no + * more entries are available, DMA will pause and an interrupt will be + * generated to indicate no more entries are available. sw can use + * this interrupt to reduce the # of times it must update the + * completion tail register. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP_HEAD 0x403C /* RX completion head */ +#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */ + +/* values used for receive interrupt blanking. loaded each time the ISR is read + * DEFAULT: 0x00000000 + */ +#define REG_RX_BLANK 0x4044 /* RX blanking register + for ISR read */ +#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if + this many sets of completion + writebacks (up to 2 packets) + occur since the last time + the ISR was read. 0 = no + packet blanking */ +#define RX_BLANK_INTR_PKT_SHIFT 0 +#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted + if that many clocks were + counted since last time the + ISR was read. + each count is 512 core + clocks (125MHz). 0 = no + time blanking */ +#define RX_BLANK_INTR_TIME_SHIFT 12 + +/* values used for interrupt generation based on threshold values of how + * many free desc and completion entries are available for hw use. + * DEFAULT: 0x00000000 + */ +#define REG_RX_AE_THRESH 0x4048 /* RX almost empty + thresholds */ +#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be + generated if # desc + avail for hw use <= + # */ +#define RX_AE_THRESH_FREE_SHIFT 0 +#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be + generated if # of + completion entries + avail for hw use <= + # */ +#define RX_AE_THRESH_COMP_SHIFT 13 + +/* probabilities for random early drop (RED) thresholds on a FIFO threshold + * basis. probability should increase when the FIFO level increases. control + * packets are never dropped and not counted in stats. probability programmed + * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped. + * DEFAULT: 0x00000000 + */ +#define REG_RX_RED 0x404C /* RX random early detect enable */ +#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */ +#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */ +#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */ +#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */ + +/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO. + * RX control FIFO = # of packets in RX FIFO. + * DEFAULT: 0x0 + */ +#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */ +#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */ +#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */ +#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */ +#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr + high */ + +/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST + * START/COMPLETE is writeable. START will clear when the BIST has completed + * checking all 17 RAMS. + * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0 + */ +#define REG_RX_BIST 0x4060 /* (ro) RX BIST */ +#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */ +#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */ +#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */ +#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */ +#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */ +#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */ +#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */ +#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */ +#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */ +#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */ +#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */ +#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */ +#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */ +#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */ +#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */ +#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */ +#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete, + summary pass bit + contains AND of BIST + results of all 16 + RAMS */ +#define RX_BIST_START 0x00000001 /* write 1 to start + BIST. self clears + on completion. */ + +/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read + * from to retrieve packet control info. + * DEFAULT: 0 + */ +#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO + write ptr */ +#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read + ptr */ + +/* receive interrupt blanking. loaded each time interrupt alias register is + * read. + * DEFAULT: 0x0 + */ +#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for + alias read */ +#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if # + completion writebacks + > # since last ISR + read. 0 = no + blanking. up to 2 + packets per + completion wb. */ +#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if # + clocks > # since last + ISR read. each count + is 512 core clocks + (125MHz). 0 = no + blanking. */ + +/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed + * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0 + * will unset the tag bit while writing HI_T1 will set the tag bit. to reset + * to normal operation after diagnostics, write to address location 0x0. + * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should + * be the last write access of a write sequence. + * DEFAULT: undefined + */ +#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */ +#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */ +#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */ +#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */ +#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */ + +/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of + * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit + * accesses. HI is 7-bits with 6-bit flow id and 1 bit control + * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI + * should be last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and + Batching FIFO addr */ +#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data + low */ +#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data + mid */ +#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data + hi and flow id */ +#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */ +#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */ + +/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO. + * DEFAULT: undefined + */ +#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */ +#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */ +#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */ +#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high + T0 */ +#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high + T1 */ + +/* 64-bit pointer to receive data buffer in host memory used for headers and + * small packets. MSB in high register. loaded by DMA state machine and + * increments as DMA writes receive data. only 50 LSB are incremented. top + * 13 bits taken from RX descriptor. + * DEFAULT: undefined + */ +#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr + low */ +#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr + high */ +#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer + low */ +#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer + high */ + +/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds + * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of + * one of the 64 byte locations in the Batching table. LOW holds 32 LSB. + * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set + * to 0 for PIO access. DATA_HIGH should be last write of write sequence. + * layout: + * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0] + * DEFAULT: undefined + */ +#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table + address */ +#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */ + +#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table + data low */ +#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table + data mid */ +#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table + data high */ + +/* cassini+ only */ +/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to + * 0. same semantics as primary desc/complete rings. + */ +#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring + 2 base low */ +#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring + 2 base high */ +#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring + 2 base low. 4 total */ +#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring + 2 base high. 4 total */ +#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1)) +#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1)) +#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */ +#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2 + reg */ +#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2 + head reg. 4 total. */ +#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2 + tail reg. 4 total. */ +#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1)) +#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1)) +#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2 + thresholds */ +#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK +#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT + +/** header parser registers **/ + +/* RX parser configuration register. + * DEFAULT: 0x1651004 + */ +#define REG_HP_CFG 0x4140 /* header parser + configuration reg */ +#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */ +#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors + 0 = 64. 0x3f = 63 */ +#define HP_CFG_NUM_CPU_SHIFT 2 +#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment + TCP seq # by one when + stored in FDBM */ +#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data + needed to be considered + for reassembly */ +#define HP_CFG_TCP_THRESH_SHIFT 9 + +/* access to RX Instruction RAM. 5-bit register/counter holds addr + * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI. + * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access + * of sequence. + * DEFAULT: undefined + */ +#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM + address */ +#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */ +#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM + data low */ +#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF +#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0 +#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000 +#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16 +#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000 +#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20 +#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000 +#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22 +#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM + data mid */ +#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003 +#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0 +#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C +#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2 +#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0 +#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6 +#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800 +#define HP_INSTR_RAM_MID_FOFF_SHIFT 11 +#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000 +#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18 +#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000 +#define HP_INSTR_RAM_MID_SOFF_SHIFT 23 +#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000 +#define HP_INSTR_RAM_MID_OP_SHIFT 30 +#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM + data high */ +#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF +#define HP_INSTR_RAM_HI_VAL_SHIFT 0 +#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000 +#define HP_INSTR_RAM_HI_MASK_SHIFT 16 + +/* PIO access into RX Header parser data RAM and flow database. + * 11-bit register. Data fills the LSB portion of bus if less than 32 bits. + * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM. + * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120] + * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access + * flow database. + * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg + * should be the last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB + RAM address */ +#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte + locations in header + parser data ram to + read/write */ +#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations + in the flow database */ +#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */ + +/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes + * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64] + * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0] + * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64] + * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0] + * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]} + * FLOW_DB(10) = bit 0 has value for flow valid + * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0] + */ +#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */ +#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4) + +/* diagnostics for RX Header Parser block. + * ASUN: the header parser state machine register is used for diagnostics + * purposes. however, the spec doesn't have any details on it. + */ +#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */ +#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */ +#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */ +#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */ +#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU + number */ +#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */ + +#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */ +#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */ +#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */ +#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */ +#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */ + +#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */ +#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */ +#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start + start offset */ +#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */ +#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */ +#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o + reassembly */ +#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split + enable */ +#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload + check */ +#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length + equal to zero */ +#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload + chk */ +#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload + threshold */ +#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */ +#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */ +#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */ +#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */ +#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */ +#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */ + +/* BIST for header parser(HP) and flow database memories (FDBM). set _START + * to start BIST. controller clears _START on completion. _START can also + * be cleared to force termination of BIST. a bit set indicates that that + * memory passed its BIST. + */ +#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */ +#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */ +#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */ +#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */ +#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */ +#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */ +#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */ +#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1 + bank 2 */ +#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3 + bank 1 */ +#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence + RAM */ +#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */ +#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */ + + +/** MAC registers. **/ +/* reset bits are set using a PIO write and self-cleared after the command + * execution has completed. + */ +#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset + command (default: 0x0) */ +#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset + command (default: 0x0) */ +/* execute a pause flow control frame transmission + DEFAULT: 0x0XXXX */ +#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */ +#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time + to be sent on network + in units of slot + times */ +#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl + frame on network */ + +/* bit set indicates that event occurred. auto-cleared when status register + * is read and have corresponding mask bits in mask register. events will + * trigger an interrupt if the corresponding mask bit is 0. + * status register default: 0x00000000 + * mask register default = 0xFFFFFFFF on reset + */ +#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */ +#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame + transmision */ +#define MAC_TX_UNDERRUN 0x0002 /* terminated frame + transmission due to + data starvation in the + xmit data path */ +#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed + length passed to TX MAC + by the DMA engine */ +#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal + collision counter */ +#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive + collision counter */ +#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late + collision counter */ +#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first + collision counter */ +#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer + timer */ +#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak + attempts counter */ + +#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */ +#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of + a frame */ +#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to + RX FIFO overflow */ +#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame + counter */ +#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment + error counter */ +#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error + counter */ +#define MAC_RX_LEN_ERR 0x0020 /* rollover of length + error counter */ +#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code + violation error */ + +/* DEFAULT: 0xXXXX0000 on reset */ +#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */ +#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful + reception of a + pause control + frame */ +#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a + transition from + "not paused" to + "paused" */ +#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a + transition from + "paused" to "not + paused" */ +#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time + operand that was + received in the last + pause flow control + frame */ + +/* layout identical to TX MAC[8:0] */ +#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */ +/* layout identical to RX MAC[6:0] */ +#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */ +/* layout identical to CTRL MAC[2:0] */ +#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */ + +/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay + * imposed before writes to other bits in the TX_MAC_CFG register or any of + * the MAC parameters is performed. delay dependent upon time required to + * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g., + * the delay for a 1518-byte frame on a 100Mbps network is 125us. + * alternatively, just poll TX_CFG_EN until it reads back as 0. + * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and + * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should + * be 0x200 (slot time of 512 bytes) + */ +#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */ +#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will + force TXMAC state + machine to remain in + idle state or to + transition to idle state + on completion of an + ongoing packet. */ +#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral + process. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff + algorithm. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the + Rx-to-TX IPG. after + receiving a frame, TX + MAC will reset its + deferral process to + carrier sense for the + amount of time = IPG0 + + IPG1 and commit to + transmission for time + specified in IPG2. when + 0 or when xmitting frames + back-to-pack (Tx-to-Tx + IPG), TX MAC ignores + IPG0 and will only use + IPG1 for deferral time. + IPG2 still used. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily + give up on frame + xmission. if backoff + algorithm reaches the + ATTEMPT_LIMIT, it will + clear attempts counter + and continue trying to + send the frame as + specified by + GIVE_UP_LIM. when 0, + TX MAC will execute + standard CSMA/CD prot. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will + continue to try to xmit + until successful. when + 0, TX MAC will continue + to try xmitting until + successful or backoff + algorithm reaches + ATTEMPT_LIMIT*16 */ +#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable + backoff algorithm. TX + MAC will not back off + after a xmission attempt + that resulted in a + collision. */ +#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that + deferral process is reset + in response to carrier + sense during the entire + duration of IPG. TX MAC + will only commit to frame + xmission after frame + xmission has actually + begun. */ +#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate + CRC for all xmitted + packets. when clear, CRC + generation is dependent + upon NO_CRC bit in the + xmit control word from + TX DMA */ +#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the + carrier extension + feature. this allows for + longer collision domains + by extending the carrier + and collision window + from the end of FCS until + the end of the slot time + if necessary. Required + for half-duplex at 1Gbps, + clear otherwise. */ + +/* when CRC is not stripped, reassembly packets will not contain the CRC. + * these will be stripped by HRP because it reassembles layer 4 data, and the + * CRC is layer 2. however, non-reassembly packets will still contain the CRC + * when passed to the host. to ensure proper operation, need to wait 3.2ms + * after clearing RX_CFG_EN before writing to any other RX MAC registers + * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears + * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same + * restrictions as CFG_EN. + */ +#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */ +#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */ +#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0. + feature not supported */ +#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the + last 4 bytes of a + received frame. */ +#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */ +#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid + multicast frames (group + bit in DA field set) */ +#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter + multicast addresses */ +#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use + address filtering regs + to filter both unicast + and multicast + addresses */ +#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to + RX DMA by setting BAD + bit but not Abort bit + in the status. CRC, + framing, and length errs + will not increment + error counters. frames + which don't match dest + addr will be passed up + w/ BAD bit set. */ +#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of + packet bursts generated + by carrier extension + with packet bursting + senders. only applies + to half-duplex 1Gbps */ + +/* DEFAULT: 0x0 */ +#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */ +#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for + sending pause flow ctrl + frames */ +#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received + pause flow ctrl frames */ +#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl + packets to RX DMA */ + +/* to ensure proper operation, a global initialization sequence should be + * performed when a loopback config is entered or exited. if programmed after + * a hw or global sw reset, RX/TX MAC software reset and initialization + * should be done to ensure stable clocking. + * DEFAULT: 0x0 + */ +#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */ +#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers + on MII xmit bus */ +#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data + path to GMII recv data + path. phy mode register + clock selection must be + set to GMII mode and + GMII_MODE should be set + to 1. in loopback mode, + REFCLK will drive the + entire mac core. 0 for + normal operation. */ +#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data + path during packet + xmission. clear to 0 + in any full duplex mode, + in any loopback mode, + or in half-duplex SERDES + or SLINK modes. set when + in half-duplex when + using external phy. */ +#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII + clocks and datapath */ +#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable + external tristate buffer + on the MII receive + bus. */ +#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */ +#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */ + +#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg. + recommended: 0x00 */ +#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg + recommended: 0x08 */ +#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg + recommended: 0x04 */ +#define REG_MAC_SLOT_TIME 0x604C /* slot time reg + recommended: 0x40 */ +#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg + recommended: 0x40 */ + +/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size. + * recommended value: 0x2000.05EE + */ +#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */ +#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */ +#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16 +#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */ +#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0 +#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of + preamble bytes that the + TX MAC will xmit at the + beginning of each frame + value should be 2 or + greater. recommended + value: 0x07 */ +#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration + of jam in units of media + byte time. recommended + value: 0x04 */ +#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. # + of attempts TX MAC will + make to xmit a frame + before it resets its + attempts counter. after + the limit has been + reached, TX MAC may or + may not drop the frame + dependent upon value + in TX_MAC_CFG. + recommended + value: 0x10 */ +#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg. + type field of a MAC + ctrl frame. recommended + value: 0x8808 */ + +/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes. + * register contains comparison + * 0 16 MSB of primary MAC addr [47:32] of DA field + * 1 16 middle bits "" [31:16] of DA field + * 2 16 LSB "" [15:0] of DA field + * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field + * 4*x 16 middle bits "" [31:16] + * 5*x 16 LSB "" [15:0] + * 42 16 MSB of MAC CTRL addr [47:32] of DA. + * 43 16 middle bits "" [31:16] + * 44 16 LSB "" [15:0] + * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames. + * if there is a match, MAC will set the bit for alternative address + * filter pass [15] + + * here is the map of registers given MAC address notation: a:b:c:d:e:f + * ab cd ef + * primary addr reg 2 reg 1 reg 0 + * alt addr 1 reg 5 reg 4 reg 3 + * alt addr x reg 5*x reg 4*x reg 3*x + * ctrl addr reg 44 reg 43 reg 42 + */ +#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */ +#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4) +#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg + [47:32] */ +#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg + [31:16] */ +#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg + [15:0] */ +#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1 + mask reg. 8-bit reg + contains nibble mask for + reg 2 and 1. */ +#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask + reg */ + +/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes + * 16-bit registers contain bits of the hash table. + * reg x -> [16*(15 - x) + 15 : 16*(15 - x)]. + * e.g., 15 -> [15:0], 0 -> [255:240] + */ +#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */ +#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4) + +/* statistics registers. these registers generate an interrupt on + * overflow. recommended initialization: 0x0000. most are 16-bits except + * for PEAK_ATTEMPTS register which is 8 bits. + */ +#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision + counter. */ +#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt + successful collision + counter */ +#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision + counter */ +#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */ +#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base + is the media byte + clock/256 */ +#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */ +#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */ +#define REG_MAC_LEN_ERR 0x61BC /* length error counter */ +#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */ +#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */ +#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation + error counter */ + +/* misc registers */ +#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg. + 10-bit register used as a + seed for the random number + generator for the CSMA/CD + backoff algorithm. only + programmed after power-on + reset and should be a + random value which has a + high likelihood of being + unique for each MAC + attached to a network + segment (e.g., 10 LSB of + MAC address) */ + +/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address + * map + */ + +/* 27-bit register has the current state for key state machines in the MAC */ +#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */ +#define MAC_SM_RLM_MASK 0x07800000 +#define MAC_SM_RLM_SHIFT 23 +#define MAC_SM_RX_FC_MASK 0x00700000 +#define MAC_SM_RX_FC_SHIFT 20 +#define MAC_SM_TLM_MASK 0x000F0000 +#define MAC_SM_TLM_SHIFT 16 +#define MAC_SM_ENCAP_SM_MASK 0x0000F000 +#define MAC_SM_ENCAP_SM_SHIFT 12 +#define MAC_SM_TX_REQ_MASK 0x00000C00 +#define MAC_SM_TX_REQ_SHIFT 10 +#define MAC_SM_TX_FC_MASK 0x000003C0 +#define MAC_SM_TX_FC_SHIFT 6 +#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038 +#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3 +#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007 +#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0 + +/** MIF registers. the MIF can be programmed in either bit-bang or + * frame mode. + **/ +#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock. + 1 -> 0 will generate a + rising edge. 0 -> 1 will + generate a falling edge. */ +#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit + register generates data */ +#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output + enable. enable when + xmitting data from MIF to + transceiver. */ + +/* 32-bit register serves as an instruction register when the MIF is + * programmed in frame mode. load this register w/ a valid instruction + * (as per IEEE 802.3u MII spec). poll this register to check for instruction + * execution completion. during a read operation, this register will also + * contain the 16-bit data returned by the tranceiver. unless specified + * otherwise, fields are considered "don't care" when polling for + * completion. + */ +#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */ +#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame. + load w/ 01 when + issuing an instr */ +#define MIF_FRAME_ST 0x40000000 /* STart of frame */ +#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a + write. 10 for a + read */ +#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */ +#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */ +#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when + issuing an instr, + this field should be + loaded w/ the XCVR + addr */ +#define MIF_FRAME_PHY_ADDR_SHIFT 23 +#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address. + when issuing an instr, + addr of register + to be read/written */ +#define MIF_FRAME_REG_ADDR_SHIFT 18 +#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB. + when issuing an instr, + set this bit to 1 */ +#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB. + when issuing an instr, + set this bit to 0. + when polling for + completion, 1 means + that instr execution + has been completed */ +#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload + load with 16-bit data + to be written in + transceiver reg for a + write. doesn't matter + in a read. when + polling for + completion, field is + "don't care" for write + and 16-bit data + returned by the + transceiver for a + read (if valid bit + is set) */ +#define REG_MIF_CFG 0x6210 /* MIF config reg */ +#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1 + 0 -> select MDIO_0 */ +#define MIF_CFG_POLL_EN 0x0002 /* enable polling + mechanism. if set, + BB_MODE should be 0 */ +#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode + 0 -> frame mode */ +#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be + used by polling mode. + only meaningful if POLL_EN + is set to 1 */ +#define MIF_CFG_POLL_REG_SHIFT 3 +#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose. + when MDIO_0 is idle, + 1 -> tranceiver is + connected to MDIO_0. + when MIF is communicating + w/ MDIO_0 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose. + when MDIO_1 is idle, + 1 -> transceiver is + connected to MDIO_1. + when MIF is communicating + w/ MDIO_1 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to + be polled */ +#define MIF_CFG_POLL_PHY_SHIFT 10 + +/* 16-bit register used to determine which bits in the POLL_STATUS portion of + * the MIF_STATUS register will cause an interrupt. if a mask bit is 0, + * corresponding bit of the POLL_STATUS will generate a MIF interrupt when + * set. DEFAULT: 0xFFFF + */ +#define REG_MIF_MASK 0x6214 /* MIF mask reg */ + +/* 32-bit register used when in poll mode. auto-cleared after being read */ +#define REG_MIF_STATUS 0x6218 /* MIF status reg */ +#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains + the "latest image" + update of the XCVR + reg being read */ +#define MIF_STATUS_POLL_DATA_SHIFT 16 +#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates + which bits in the + POLL_DATA field have + changed since the + MIF_STATUS reg was + last read */ +#define MIF_STATUS_POLL_STATUS_SHIFT 0 + +/* 7-bit register has current state for all state machines in the MIF */ +#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */ +#define MIF_SM_CONTROL_MASK 0x07 /* control state machine + state */ +#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine + state */ + +/** PCS/Serialink. the following registers are equivalent to the standard + * MII management registers except that they're directly mapped in + * Cassini's register space. + **/ + +/* the auto-negotiation enable bit should be programmed the same at + * the link partner as in the local device to enable auto-negotiation to + * complete. when that bit is reprogrammed, auto-neg/manual config is + * restarted automatically. + * DEFAULT: 0x1040 + */ +#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */ +#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on + writes */ +#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS + to MAC interface is + activated regardless + of activity */ +#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS + behaviour same for + half and full dplx */ +#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing. + restart auto- + negotiation */ +#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored + on writes */ +#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored + on writes */ +#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes + through automatic + link config before it + can be used. when 0, + link can be used + w/out any link config + phase */ +#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on + writes */ +#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears + when done */ + +/* DEFAULT: 0x0108 */ +#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */ +#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */ +#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */ +#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up. + 0 -> link down. 0 is + latched so that 0 is + kept until read. read + 2x to determine if the + link has gone up again */ +#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform + auto-neg) */ +#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected + from received link code + word. only valid after + auto-neg completed */ +#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation + completed + 0 -> auto-negotiation not + completed */ +#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an + indication that this is + a 1000 Base-X PHY. writes + to it are ignored */ + +/* used during auto-negotiation. + * DEFAULT: 0x00E0 + */ +#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement + reg */ +#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE + symmetric capability */ +#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE + asymmetric capability */ +#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13 + to optionally indicate to + link partner that chip is + going off-line. bit12 will + get set when signal + detect == FAIL and will + remain set until + successful negotiation */ +#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */ +#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */ + +/* contents updated as a result of autonegotiation. layout and definitions + * identical to PCS_MII_ADVERT + */ +#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner + ability reg */ +#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD +#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD +#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE +#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE +#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK +#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK +#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE + +/* DEFAULT: 0x0 */ +#define REG_PCS_CFG 0x9010 /* PCS config reg */ +#define PCS_CFG_EN 0x01 /* enable PCS. must be + 0 when modifying + PCS_MII_ADVERT */ +#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to + OK. bit is + non-resettable */ +#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation + of optical signal to make + signal detect okay when + signal is low */ +#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter + measurements. a single + code group is xmitted + regularly. + 0x0 = normal operation + 0x1 = high freq test + pattern, D21.5 + 0x2 = low freq test + pattern, K28.7 + 0x3 = reserved */ +#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto- + negotiation timer to + a few cycles for test + purposes */ + +/* used for diagnostic purposes. bits 20-22 autoclear on read */ +#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine + and diagnostic reg */ +#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate + xmission of idle. + otherwise, xmission of + a packet */ +#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception + of idle. otherwise, + reception of packet */ +#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of + sync */ +#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3 + indicates reception of + Config codes. cycling + through 0-1 indicates + reception of idles */ +#define PCS_SM_LINK_STATE_MASK 0x0001E000 +#define SM_LINK_STATE_UP 0x00016000 /* link state is up */ + +#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to + recept of Config + codes */ +#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to + loss of sync */ +#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes + from OK to FAIL. bit29 + will also be set if + this is set */ +#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to + receipt of breaklink + C codes from partner. + C codes w/ 0 content + received triggering + start/restart of + autonegotiation. + should be sent for + no longer than 20ms */ +#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being + initialized. see serdes + state reg */ +#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or + not received */ +#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not + achieved */ +#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes + w/ ack bit set */ +#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues + to send C codes + instead of idle + symbols or pkt data */ + +/* this register indicates interrupt changes in specific PCS MII status bits. + * PCS_INT may be masked at the ISR level. only a single bit is implemented + * for link status change. + */ +#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */ +#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed + since last read */ + +/* control which network interface is used. no more than one bit should + * be set. + * DEFAULT: none + */ +#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */ +#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and + MII/GMII is selected. + selection between MII and + GMII is controlled by + XIF_CFG */ +#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the + 10-bit interface */ + +/* input to serdes chip or serialink block */ +#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */ +#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on + serdes interface */ +#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier + detection. should be + 0x0 for normal + operation */ +#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1] + to REFCLK when set. + when clear, receiver + clock locks to incoming + serial data */ + +/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins. + * should be 0x0 for normal operations. + * 0b000 normal operation, PROM address[3:0] selected + * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read + * 0b010 rxmac req, rx ack, rx tag, rx clk shared + * 0b011 txmac req, tx ack, tx tag, tx retry req + * 0b100 tx tp3, tx tp2, tx tp1, tx tp0 + * 0b101 R period RX, R period TX, R period HP, R period BIM + * DEFAULT: 0x0 + */ +#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */ +#define PCS_SOS_PROM_ADDR_MASK 0x0007 + +/* used for diagnostics. this register indicates progress of the SERDES + * boot up. + * 0b00 undergoing reset + * 0b01 waiting 500us while lockrefn is asserted + * 0b10 waiting for comma detect + * 0b11 receive data is synchronized + * DEFAULT: 0x0 + */ +#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */ +#define PCS_SERDES_STATE_MASK 0x03 + +/* used for diagnostics. indicates number of packets transmitted or received. + * counters rollover w/out generating an interrupt. + * DEFAULT: 0x0 + */ +#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */ +#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */ +#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS + whether they + encountered an error + or not */ + +/** LocalBus Devices. the following provides run-time access to the + * Cassini's PROM + ***/ +#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time + access */ +#define REG_EXPANSION_ROM_RUN_END 0x17FFFF + +#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus + device */ +#define REG_SECOND_LOCALBUS_END 0x1FFFFF + +/* entropy device */ +#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START +#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00) +#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04) +#define ENTROPY_STATUS_DRDY 0x01 +#define ENTROPY_STATUS_BUSY 0x02 +#define ENTROPY_STATUS_CIPHER 0x04 +#define ENTROPY_STATUS_BYPASS_MASK 0x18 +#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05) +#define ENTROPY_MODE_KEY_MASK 0x07 +#define ENTROPY_MODE_ENCRYPT 0x40 +#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06) +#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07) +#define ENTROPY_RESET_DES_IO 0x01 +#define ENTROPY_RESET_STC_MODE 0x02 +#define ENTROPY_RESET_KEY_CACHE 0x04 +#define ENTROPY_RESET_IV 0x08 +#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08) +#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10) +#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x)) + +/* phys of interest w/ their special mii registers */ +#define PHY_LUCENT_B0 0x00437421 +#define LUCENT_MII_REG 0x1F + +#define PHY_NS_DP83065 0x20005c78 +#define DP83065_MII_MEM 0x16 +#define DP83065_MII_REGD 0x1D +#define DP83065_MII_REGE 0x1E + +#define PHY_BROADCOM_5411 0x00206071 +#define PHY_BROADCOM_B0 0x00206050 +#define BROADCOM_MII_REG4 0x14 +#define BROADCOM_MII_REG5 0x15 +#define BROADCOM_MII_REG7 0x17 +#define BROADCOM_MII_REG8 0x18 + +#define CAS_MII_ANNPTR 0x07 +#define CAS_MII_ANNPRR 0x08 +#define CAS_MII_1000_CTRL 0x09 +#define CAS_MII_1000_STATUS 0x0A +#define CAS_MII_1000_EXTEND 0x0F + +#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */ +/* + * if autoneg is disabled, here's the table: + * BMCR_SPEED100 = 100Mbps + * BMCR_SPEED1000 = 1000Mbps + * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps + */ +#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */ + +#define CAS_ADVERTISE_1000HALF 0x0100 +#define CAS_ADVERTISE_1000FULL 0x0200 +#define CAS_ADVERTISE_PAUSE 0x0400 +#define CAS_ADVERTISE_ASYM_PAUSE 0x0800 + +/* regular lpa register */ +#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE +#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE + +/* 1000_STATUS register */ +#define CAS_LPA_1000HALF 0x0400 +#define CAS_LPA_1000FULL 0x0800 + +#define CAS_EXTEND_1000XFULL 0x8000 +#define CAS_EXTEND_1000XHALF 0x4000 +#define CAS_EXTEND_1000TFULL 0x2000 +#define CAS_EXTEND_1000THALF 0x1000 + +/* cassini header parser firmware */ +typedef struct cas_hp_inst { + const char *note; + + u16 mask, val; + + u8 op; + u8 soff, snext; /* if match succeeds, new offset and match */ + u8 foff, fnext; /* if match fails, new offset and match */ + /* output info */ + u8 outop; /* output opcode */ + + u16 outarg; /* output argument */ + u8 outenab; /* output enable: 0 = not, 1 = if match + 2 = if !match, 3 = always */ + u8 outshift; /* barrel shift right, 4 bits */ + u16 outmask; +} cas_hp_inst_t; + +/* comparison */ +#define OP_EQ 0 /* packet == value */ +#define OP_LT 1 /* packet < value */ +#define OP_GT 2 /* packet > value */ +#define OP_NP 3 /* new packet */ + +/* output opcodes */ +#define CL_REG 0 +#define LD_FID 1 +#define LD_SEQ 2 +#define LD_CTL 3 +#define LD_SAP 4 +#define LD_R1 5 +#define LD_L3 6 +#define LD_SUM 7 +#define LD_HDR 8 +#define IM_FID 9 +#define IM_SEQ 10 +#define IM_SAP 11 +#define IM_R1 12 +#define IM_CTL 13 +#define LD_LEN 14 +#define ST_FLG 15 + +/* match setp #s for IP4TCP4 */ +#define S1_PCKT 0 +#define S1_VLAN 1 +#define S1_CFI 2 +#define S1_8023 3 +#define S1_LLC 4 +#define S1_LLCc 5 +#define S1_IPV4 6 +#define S1_IPV4c 7 +#define S1_IPV4F 8 +#define S1_TCP44 9 +#define S1_IPV6 10 +#define S1_IPV6L 11 +#define S1_IPV6c 12 +#define S1_TCP64 13 +#define S1_TCPSQ 14 +#define S1_TCPFG 15 +#define S1_TCPHL 16 +#define S1_TCPHc 17 +#define S1_CLNP 18 +#define S1_CLNP2 19 +#define S1_DROP 20 +#define S2_HTTP 21 +#define S1_ESP4 22 +#define S1_AH4 23 +#define S1_ESP6 24 +#define S1_AH6 25 + +#define CAS_PROG_IP46TCP4_PREAMBLE \ +{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \ + CL_REG, 0x3ff, 1, 0x0, 0x0000}, \ +{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \ + IM_CTL, 0x00a, 3, 0x0, 0xffff}, \ +{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \ + LD_SAP, 0x100, 3, 0x0, 0xffff}, \ +{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \ + LD_SUM, 0x00a, 1, 0x0, 0x0000}, \ +{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \ + LD_LEN, 0x03e, 1, 0x0, 0xffff}, \ +{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \ + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \ +{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \ + LD_SUM, 0x015, 1, 0x0, 0x0000}, \ +{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \ + IM_R1, 0x128, 1, 0x0, 0xffff}, \ +{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \ + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \ +{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \ + LD_LEN, 0x03f, 1, 0x0, 0xffff} + +#ifdef USE_HP_IP46TCP4 +static cas_hp_inst_t cas_prog_ip46tcp4tab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x000, 0, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab +#endif +#endif + +/* + * Alternate table load which excludes HTTP server traffic from reassembly. + * It is substantially similar to the basic table, with one extra state + * and a few extra compares. */ +#ifdef USE_HP_IP46TCP4NOHTTP +static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4NOHTTP_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab +#endif +#endif + +/* match step #s for IP4FRAG */ +#define S3_IPV6c 11 +#define S3_TCP64 12 +#define S3_TCPSQ 13 +#define S3_TCPFG 14 +#define S3_TCPHL 15 +#define S3_TCPHc 16 +#define S3_FRAG 17 +#define S3_FOFF 18 +#define S3_CLNP 19 + +#ifdef USE_HP_IP4FRAG +static cas_hp_inst_t cas_prog_ip4fragtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, + CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG, + LD_LEN, 0x03e, 3, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0, + S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */ + { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_SEQ, 0x040, 1, 0xD, 0xfff8}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_IP4FRAG_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip4fragtab +#endif +#endif + +/* + * Alternate table which does batching without reassembly + */ +#ifdef USE_HP_IP46TCP4BATCH +static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */ + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4BATCH_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab +#endif +#endif + +/* Workaround for Cassini rev2 descriptor corruption problem. + * Does batching without reassembly, and sets the SAP to a known + * data pattern for all packets. + */ +#ifdef USE_HP_WORKAROUND +static cas_hp_inst_t cas_prog_workaroundtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} , + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x04a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + IM_SAP, 0x6AE, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_SAP, 0x6AE, 3, 0x0, 0xffff} , + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_WORKAROUND_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_workaroundtab +#endif +#endif + +#ifdef USE_HP_ENCRYPT +static cas_hp_inst_t cas_prog_encryptiontab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, +#if 0 +//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */ +//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00 + 00, +#endif + { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */ + 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4, + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", +#if 0 +//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff, +#endif + 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN, + 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* 14:DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000} , + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { "IPV4 ESP encrypted?", /* S1_ESP4 */ + 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV4 AH encrypted?", /* S1_AH4 */ + 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 ESP encrypted?", /* S1_ESP6 */ +#if 0 +//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 AH encrypted?", /* S1_AH6 */ +#if 0 +//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_ENCRYPT_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_encryptiontab +#endif +#endif + +static cas_hp_inst_t cas_prog_null[] = { {NULL} }; +#ifdef HP_NULL_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_null +#endif + +/* firmware patch for NS_DP83065 */ +typedef struct cas_saturn_patch { + u16 addr; + u16 val; +} cas_saturn_patch_t; + +#if 1 +cas_saturn_patch_t cas_saturn_patch[] = { +{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009}, +{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000}, +{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000}, +{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff}, +{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025}, +{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f}, +{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026}, +{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011}, +{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d}, +{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086}, +{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f}, +{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3}, +{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047}, +{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a}, +{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047}, +{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033}, +{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f}, +{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084}, +{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004}, +{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096}, +{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c}, +{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027}, +{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084}, +{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047}, +{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a}, +{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047}, +{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054}, +{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f}, +{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084}, +{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004}, +{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6}, +{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084}, +{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003}, +{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025}, +{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6}, +{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f}, +{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7}, +{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f}, +{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec}, +{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa}, +{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7}, +{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082}, +{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001}, +{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046}, +{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081}, +{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a}, +{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020}, +{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027}, +{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084}, +{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7}, +{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084}, +{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047}, +{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a}, +{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047}, +{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad}, +{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082}, +{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001}, +{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084}, +{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041}, +{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026}, +{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023}, +{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027}, +{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed}, +{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083}, +{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042}, +{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e}, +{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084}, +{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003}, +{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df}, +{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6}, +{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f}, +{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7}, +{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f}, +{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec}, +{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa}, +{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011}, +{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd}, +{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce}, +{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff}, +{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096}, +{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c}, +{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027}, +{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049}, +{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091}, +{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6}, +{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085}, +{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c}, +{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1}, +{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f}, +{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025}, +{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016}, +{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052}, +{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e}, +{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7}, +{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6}, +{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4}, +{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083}, +{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001}, +{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046}, +{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081}, +{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a}, +{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd}, +{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025}, +{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084}, +{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084}, +{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018}, +{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019}, +{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004}, +{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e}, +{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b}, +{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007}, +{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027}, +{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081}, +{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b}, +{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007}, +{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027}, +{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e}, +{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd}, +{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086}, +{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049}, +{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012}, +{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071}, +{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f}, +{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084}, +{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008}, +{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6}, +{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4}, +{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006}, +{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025}, +{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016}, +{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e}, +{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd}, +{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026}, +{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082}, +{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001}, +{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084}, +{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f}, +{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec}, +{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa}, +{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7}, +{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f}, +{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd}, +{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce}, +{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff}, +{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096}, +{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c}, +{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026}, +{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012}, +{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f}, +{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027}, +{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023}, +{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027}, +{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084}, +{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051}, +{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091}, +{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e}, +{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce}, +{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff}, +{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e}, +{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd}, +{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c}, +{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce}, +{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff}, +{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e}, +{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096}, +{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c}, +{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026}, +{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024}, +{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026}, +{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018}, +{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019}, +{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001}, +{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009}, +{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020}, +{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081}, +{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015}, +{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044}, +{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1}, +{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f}, +{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044}, +{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029}, +{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025}, +{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f}, +{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047}, +{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a}, +{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047}, +{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034}, +{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011}, +{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084}, +{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002}, +{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e}, +{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096}, +{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc}, +{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097}, +{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1}, +{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086}, +{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012}, +{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7}, +{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010}, +{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd}, +{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031}, +{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e}, +{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6}, +{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f}, +{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7}, +{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f}, +{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec}, +{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa}, +{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008}, +{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5}, +{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002}, +{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6}, +{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4}, +{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084}, +{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001}, +{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046}, +{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081}, +{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003}, +{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f}, +{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd}, +{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025}, +{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085}, +{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044}, +{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026}, +{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012}, +{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001}, +{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010}, +{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd}, +{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce}, +{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff}, +{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e}, +{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096}, +{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003}, +{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026}, +{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012}, +{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003}, +{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027}, +{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085}, +{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044}, +{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026}, +{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012}, +{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001}, +{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010}, +{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce}, +{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff}, +{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e}, +{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6}, +{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a}, +{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010}, +{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085}, +{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8}, +{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000}, +{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084}, +{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001}, +{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085}, +{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046}, +{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081}, +{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009}, +{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030}, +{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081}, +{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f}, +{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044}, +{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b}, +{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029}, +{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026}, +{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011}, +{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022}, +{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6}, +{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba}, +{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084}, +{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d}, +{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085}, +{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005}, +{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e}, +{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca}, +{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022}, +{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000}, +{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018}, +{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000}, +{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046}, +{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085}, +{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002}, +{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085}, +{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001}, +{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f}, +{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004}, +{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004}, +{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7}, +{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086}, +{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012}, +{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007}, +{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006}, +{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d}, +{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070}, +{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba}, +{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7}, +{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001}, +{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001}, +{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6}, +{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084}, +{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002}, +{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004}, +{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001}, +{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001}, +{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4}, +{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7}, +{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6}, +{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084}, +{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008}, +{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6}, +{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081}, +{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008}, +{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7}, +{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e}, +{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086}, +{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040}, +{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e}, +{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7}, +{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f}, +{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082}, +{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f}, +{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f}, +{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f}, +{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f}, +{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f}, +{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f}, +{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f}, +{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f}, +{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f}, +{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f}, +{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f}, +{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f}, +{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f}, +{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012}, +{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010}, +{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004}, +{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7}, +{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7}, +{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7}, +{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7}, +{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086}, +{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012}, +{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012}, +{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7}, +{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004}, +{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004}, +{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001}, +{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001}, +{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008}, +{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081}, +{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b}, +{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce}, +{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd}, +{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e}, +{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081}, +{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b}, +{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce}, +{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd}, +{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e}, +{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081}, +{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b}, +{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce}, +{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd}, +{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e}, +{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081}, +{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b}, +{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce}, +{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd}, +{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e}, +{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081}, +{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b}, +{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce}, +{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd}, +{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e}, +{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081}, +{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b}, +{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce}, +{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd}, +{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e}, +{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081}, +{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b}, +{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce}, +{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd}, +{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e}, +{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081}, +{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008}, +{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce}, +{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd}, +{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6}, +{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081}, +{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003}, +{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047}, +{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009}, +{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081}, +{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006}, +{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009}, +{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe}, +{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006}, +{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081}, +{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008}, +{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7}, +{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e}, +{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6}, +{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026}, +{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f}, +{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7}, +{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e}, +{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6}, +{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084}, +{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f}, +{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b}, +{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012}, +{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012}, +{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc}, +{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009}, +{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe}, +{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070}, +{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f}, +{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c}, +{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f}, +{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084}, +{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f}, +{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c}, +{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f}, +{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1}, +{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003}, +{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040}, +{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f}, +{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027}, +{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b}, +{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081}, +{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b}, +{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027}, +{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087}, +{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f}, +{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002}, +{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a}, +{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7}, +{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086}, +{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f}, +{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012}, +{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075}, +{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7}, +{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020}, +{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f}, +{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002}, +{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071}, +{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047}, +{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097}, +{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089}, +{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f}, +{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089}, +{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f}, +{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089}, +{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f}, +{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089}, +{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f}, +{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089}, +{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7}, +{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7}, +{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6}, +{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027}, +{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f}, +{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f}, +{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f}, +{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d}, +{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078}, +{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c}, +{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6}, +{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027}, +{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f}, +{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f}, +{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f}, +{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b}, +{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d}, +{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075}, +{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c}, +{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a}, +{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027}, +{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f}, +{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f}, +{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c}, +{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083}, +{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075}, +{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078}, +{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b}, +{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc}, +{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d}, +{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000}, +{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070}, +{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072}, +{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e}, +{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6}, +{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026}, +{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce}, +{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd}, +{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e}, +{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6}, +{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026}, +{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce}, +{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd}, +{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e}, +{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6}, +{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026}, +{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce}, +{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd}, +{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e}, +{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086}, +{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040}, +{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000}, +{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075}, +{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e}, +{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012}, +{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8}, +{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012}, +{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f}, +{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007}, +{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048}, +{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6}, +{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4}, +{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7}, +{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6}, +{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081}, +{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf}, +{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005}, +{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b}, +{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005}, +{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6}, +{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd}, +{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086}, +{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f}, +{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089}, +{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002}, +{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077}, +{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094}, +{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6}, +{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd}, +{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce}, +{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6}, +{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001}, +{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081}, +{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003}, +{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066}, +{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8}, +{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084}, +{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b}, +{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079}, +{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008}, +{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e}, +{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6}, +{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a}, +{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012}, +{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012}, +{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb}, +{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7}, +{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6}, +{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036}, +{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c}, +{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7}, +{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086}, +{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012}, +{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012}, +{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001}, +{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001}, +{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe}, +{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004}, +{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004}, +{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba}, +{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7}, +{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086}, +{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012}, +{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012}, +{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7}, +{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6}, +{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084}, +{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008}, +{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c}, +{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026}, +{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076}, +{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e}, +{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e}, +{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6}, +{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081}, +{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c}, +{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7}, +{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d}, +{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb}, +{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004}, +{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7}, +{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce}, +{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6}, +{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081}, +{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005}, +{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6}, +{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6}, +{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084}, +{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012}, +{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083}, +{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c}, +{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000}, +{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006}, +{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b}, +{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083}, +{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041}, +{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e}, +{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7}, +{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086}, +{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f}, +{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012}, +{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f}, +{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c}, +{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7}, +{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086}, +{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a}, +{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012}, +{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009}, +{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c}, +{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d}, +{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c}, +{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e}, +{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027}, +{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020}, +{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e}, +{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c}, +{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba}, +{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7}, +{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6}, +{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048}, +{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d}, +{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021}, +{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004}, +{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7}, +{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd}, +{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f}, +{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000}, +{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000}, +{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008}, +{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5}, +{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c}, +{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba}, +{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7}, +{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6}, +{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084}, +{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001}, +{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006}, +{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7}, +{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036}, +{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7}, +{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032}, +{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026}, +{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f}, +{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089}, +{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001}, +{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1}, +{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c}, +{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027}, +{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f}, +{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005}, +{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080}, +{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080}, +{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7}, +{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6}, +{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005}, +{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f}, +{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f}, +{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4}, +{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b}, +{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007}, +{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f}, +{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000}, +{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000}, +{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000}, +{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6}, +{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6}, +{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7}, +{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001}, +{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018}, +{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018}, +{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7}, +{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6}, +{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007}, +{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4}, +{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054}, +{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7}, +{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a}, +{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039}, +{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084}, +{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022}, +{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7}, +{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6}, +{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7}, +{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6}, +{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4}, +{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f}, +{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072}, +{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072}, +{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071}, +{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027}, +{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001}, +{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081}, +{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024}, +{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070}, +{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096}, +{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080}, +{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064}, +{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070}, +{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096}, +{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010}, +{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064}, +{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070}, +{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096}, +{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020}, +{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064}, +{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070}, +{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096}, +{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040}, +{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074}, +{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074}, +{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078}, +{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6}, +{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085}, +{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af}, +{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4}, +{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6}, +{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081}, +{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036}, +{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026}, +{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022}, +{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044}, +{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022}, +{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020}, +{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081}, +{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d}, +{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084}, +{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044}, +{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022}, +{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020}, +{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081}, +{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f}, +{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084}, +{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044}, +{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6}, +{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f}, +{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022}, +{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c}, +{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006}, +{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed}, +{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007}, +{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9}, +{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006}, +{0x8aca, 0x0039}, { 0x0, 0x0 } +}; +#else +cas_saturn_patch_t cas_saturn_patch[] = { +{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009}, +{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000}, +{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000}, +{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff}, +{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025}, +{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f}, +{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026}, +{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011}, +{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d}, +{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086}, +{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f}, +{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3}, +{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047}, +{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a}, +{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047}, +{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033}, +{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f}, +{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084}, +{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004}, +{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096}, +{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c}, +{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027}, +{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084}, +{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047}, +{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a}, +{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047}, +{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054}, +{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f}, +{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084}, +{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004}, +{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6}, +{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084}, +{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003}, +{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025}, +{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6}, +{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f}, +{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7}, +{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f}, +{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec}, +{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa}, +{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7}, +{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082}, +{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001}, +{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046}, +{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081}, +{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a}, +{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020}, +{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027}, +{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084}, +{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7}, +{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084}, +{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047}, +{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a}, +{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047}, +{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad}, +{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082}, +{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001}, +{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084}, +{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041}, +{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026}, +{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023}, +{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027}, +{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed}, +{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083}, +{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042}, +{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e}, +{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084}, +{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003}, +{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df}, +{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6}, +{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f}, +{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7}, +{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f}, +{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec}, +{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa}, +{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011}, +{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd}, +{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce}, +{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff}, +{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096}, +{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c}, +{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027}, +{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049}, +{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091}, +{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6}, +{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085}, +{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c}, +{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1}, +{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f}, +{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025}, +{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016}, +{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052}, +{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e}, +{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7}, +{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6}, +{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4}, +{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083}, +{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001}, +{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046}, +{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081}, +{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a}, +{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd}, +{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025}, +{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084}, +{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084}, +{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018}, +{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019}, +{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004}, +{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e}, +{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b}, +{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007}, +{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027}, +{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081}, +{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b}, +{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007}, +{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027}, +{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e}, +{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd}, +{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086}, +{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049}, +{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012}, +{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071}, +{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f}, +{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084}, +{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008}, +{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6}, +{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4}, +{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006}, +{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025}, +{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016}, +{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e}, +{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd}, +{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026}, +{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082}, +{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001}, +{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084}, +{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f}, +{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec}, +{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa}, +{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7}, +{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f}, +{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd}, +{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce}, +{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff}, +{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096}, +{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c}, +{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026}, +{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012}, +{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f}, +{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027}, +{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023}, +{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027}, +{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084}, +{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051}, +{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091}, +{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e}, +{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce}, +{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff}, +{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e}, +{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd}, +{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c}, +{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce}, +{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff}, +{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e}, +{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096}, +{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c}, +{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026}, +{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024}, +{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026}, +{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018}, +{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019}, +{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001}, +{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009}, +{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020}, +{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081}, +{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015}, +{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044}, +{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1}, +{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f}, +{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044}, +{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029}, +{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025}, +{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f}, +{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047}, +{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a}, +{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047}, +{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034}, +{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011}, +{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084}, +{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002}, +{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e}, +{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096}, +{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc}, +{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097}, +{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1}, +{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086}, +{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012}, +{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7}, +{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010}, +{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd}, +{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031}, +{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e}, +{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6}, +{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f}, +{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7}, +{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f}, +{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec}, +{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa}, +{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008}, +{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5}, +{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002}, +{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6}, +{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4}, +{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084}, +{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001}, +{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046}, +{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081}, +{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003}, +{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f}, +{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd}, +{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025}, +{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085}, +{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044}, +{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026}, +{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012}, +{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001}, +{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010}, +{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd}, +{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce}, +{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff}, +{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e}, +{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096}, +{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003}, +{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026}, +{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012}, +{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003}, +{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027}, +{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085}, +{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044}, +{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026}, +{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012}, +{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001}, +{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010}, +{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce}, +{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff}, +{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e}, +{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6}, +{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a}, +{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010}, +{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085}, +{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8}, +{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000}, +{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084}, +{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001}, +{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085}, +{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046}, +{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081}, +{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009}, +{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030}, +{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081}, +{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f}, +{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044}, +{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b}, +{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029}, +{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026}, +{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011}, +{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022}, +{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6}, +{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba}, +{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084}, +{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d}, +{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085}, +{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005}, +{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e}, +{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca}, +{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022}, +{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000}, +{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018}, +{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000}, +{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046}, +{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085}, +{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002}, +{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085}, +{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001}, +{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f}, +{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004}, +{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004}, +{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7}, +{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086}, +{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012}, +{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007}, +{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006}, +{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d}, +{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070}, +{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba}, +{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7}, +{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001}, +{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001}, +{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6}, +{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084}, +{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002}, +{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004}, +{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001}, +{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001}, +{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4}, +{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7}, +{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6}, +{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084}, +{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008}, +{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6}, +{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081}, +{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008}, +{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7}, +{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e}, +{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086}, +{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040}, +{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e}, +{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7}, +{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f}, +{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082}, +{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f}, +{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f}, +{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f}, +{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f}, +{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f}, +{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f}, +{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f}, +{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f}, +{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f}, +{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f}, +{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f}, +{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f}, +{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f}, +{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012}, +{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010}, +{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004}, +{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7}, +{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7}, +{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7}, +{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7}, +{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086}, +{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012}, +{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012}, +{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7}, +{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004}, +{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004}, +{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001}, +{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001}, +{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008}, +{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081}, +{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b}, +{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce}, +{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd}, +{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e}, +{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081}, +{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b}, +{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce}, +{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd}, +{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e}, +{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081}, +{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b}, +{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce}, +{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd}, +{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e}, +{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081}, +{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b}, +{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce}, +{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd}, +{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e}, +{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081}, +{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b}, +{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce}, +{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd}, +{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e}, +{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081}, +{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b}, +{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce}, +{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd}, +{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e}, +{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081}, +{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b}, +{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce}, +{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd}, +{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e}, +{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081}, +{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008}, +{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce}, +{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd}, +{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6}, +{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081}, +{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003}, +{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047}, +{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009}, +{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081}, +{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006}, +{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009}, +{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe}, +{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006}, +{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081}, +{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008}, +{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7}, +{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e}, +{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6}, +{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026}, +{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f}, +{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7}, +{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e}, +{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6}, +{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084}, +{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f}, +{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b}, +{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012}, +{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012}, +{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc}, +{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009}, +{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe}, +{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070}, +{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f}, +{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c}, +{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f}, +{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084}, +{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f}, +{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c}, +{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f}, +{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1}, +{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003}, +{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040}, +{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f}, +{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027}, +{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b}, +{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081}, +{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b}, +{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027}, +{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087}, +{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f}, +{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002}, +{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a}, +{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7}, +{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086}, +{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f}, +{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012}, +{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075}, +{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7}, +{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020}, +{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f}, +{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002}, +{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071}, +{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047}, +{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097}, +{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089}, +{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f}, +{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089}, +{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f}, +{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089}, +{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f}, +{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089}, +{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f}, +{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089}, +{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7}, +{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7}, +{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6}, +{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027}, +{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f}, +{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f}, +{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f}, +{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d}, +{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078}, +{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c}, +{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6}, +{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027}, +{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f}, +{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f}, +{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f}, +{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b}, +{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d}, +{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075}, +{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c}, +{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a}, +{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027}, +{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f}, +{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f}, +{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c}, +{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083}, +{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075}, +{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078}, +{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b}, +{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc}, +{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d}, +{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000}, +{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070}, +{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072}, +{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e}, +{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6}, +{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026}, +{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce}, +{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd}, +{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e}, +{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6}, +{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026}, +{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce}, +{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd}, +{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e}, +{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6}, +{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026}, +{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce}, +{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd}, +{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e}, +{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086}, +{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040}, +{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e}, +{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075}, +{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e}, +{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012}, +{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8}, +{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012}, +{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f}, +{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007}, +{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048}, +{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6}, +{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4}, +{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7}, +{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6}, +{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081}, +{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf}, +{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005}, +{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b}, +{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005}, +{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6}, +{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd}, +{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086}, +{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f}, +{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089}, +{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002}, +{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077}, +{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094}, +{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6}, +{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd}, +{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce}, +{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6}, +{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001}, +{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081}, +{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003}, +{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066}, +{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8}, +{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084}, +{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b}, +{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079}, +{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008}, +{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e}, +{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6}, +{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a}, +{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012}, +{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012}, +{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb}, +{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7}, +{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6}, +{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036}, +{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c}, +{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7}, +{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086}, +{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012}, +{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012}, +{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001}, +{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001}, +{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe}, +{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004}, +{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004}, +{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba}, +{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7}, +{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086}, +{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012}, +{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012}, +{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7}, +{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6}, +{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084}, +{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008}, +{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c}, +{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026}, +{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076}, +{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e}, +{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e}, +{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6}, +{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081}, +{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c}, +{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7}, +{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d}, +{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb}, +{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004}, +{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7}, +{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce}, +{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6}, +{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081}, +{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005}, +{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6}, +{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6}, +{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084}, +{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012}, +{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083}, +{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c}, +{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000}, +{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006}, +{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b}, +{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083}, +{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041}, +{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e}, +{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7}, +{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086}, +{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f}, +{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012}, +{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f}, +{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c}, +{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7}, +{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086}, +{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a}, +{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012}, +{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009}, +{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c}, +{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d}, +{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c}, +{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e}, +{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027}, +{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020}, +{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e}, +{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c}, +{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba}, +{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7}, +{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6}, +{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048}, +{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d}, +{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021}, +{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004}, +{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7}, +{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd}, +{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f}, +{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000}, +{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000}, +{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008}, +{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5}, +{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c}, +{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba}, +{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7}, +{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6}, +{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084}, +{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001}, +{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006}, +{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7}, +{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036}, +{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7}, +{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032}, +{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026}, +{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f}, +{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089}, +{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001}, +{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1}, +{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c}, +{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027}, +{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f}, +{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005}, +{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080}, +{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080}, +{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7}, +{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6}, +{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005}, +{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f}, +{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f}, +{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4}, +{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b}, +{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007}, +{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f}, +{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000}, +{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000}, +{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000}, +{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6}, +{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6}, +{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7}, +{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001}, +{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018}, +{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018}, +{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7}, +{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6}, +{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007}, +{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4}, +{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054}, +{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7}, +{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a}, +{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039}, +{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084}, +{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022}, +{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7}, +{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6}, +{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7}, +{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6}, +{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4}, +{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f}, +{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072}, +{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072}, +{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071}, +{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027}, +{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001}, +{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081}, +{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024}, +{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070}, +{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096}, +{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080}, +{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064}, +{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070}, +{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096}, +{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010}, +{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064}, +{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070}, +{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096}, +{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020}, +{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064}, +{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070}, +{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096}, +{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040}, +{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074}, +{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074}, +{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078}, +{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6}, +{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085}, +{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af}, +{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4}, +{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6}, +{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081}, +{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036}, +{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026}, +{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022}, +{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044}, +{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022}, +{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020}, +{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081}, +{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d}, +{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084}, +{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044}, +{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022}, +{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020}, +{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081}, +{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f}, +{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084}, +{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044}, +{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6}, +{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f}, +{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022}, +{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c}, +{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006}, +{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed}, +{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007}, +{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9}, +{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006}, +{0x8aca, 0x0039}, { 0x0, 0x0 } +}; +#endif + + +/* phy types */ +#define CAS_PHY_UNKNOWN 0x00 +#define CAS_PHY_SERDES 0x01 +#define CAS_PHY_MII_MDIO0 0x02 +#define CAS_PHY_MII_MDIO1 0x04 +#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1)) + +/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE + * is the actual size. the default index for the various rings is + * 8. NOTE: there a bunch of alignment constraints for the rings. to + * deal with that, i just allocate rings to create the desired + * alignment. here are the constraints: + * RX DESC and COMP rings must be 8KB aligned + * TX DESC must be 2KB aligned. + * if you change the numbers, be cognizant of how the alignment will change + * in INIT_BLOCK as well. + */ + +#define DESC_RING_I_TO_S(x) (32*(1 << (x))) +#define COMP_RING_I_TO_S(x) (128*(1 << (x))) +#define TX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */ + +#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0) +#error TX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0) +#error RX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0) +#error RX_COMP_RING_INDEX must be between 0 and 8 +#endif + +#define N_TX_RINGS MAX_TX_RINGS /* for QoS */ +#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK +#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */ +#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */ + +/* number of flows that can go through re-assembly */ +#define N_RX_FLOWS 64 + +#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX) +#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX) +#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX) +#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX +#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX +#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX +#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE +#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE +#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE + +/* convert values */ +#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK)) +#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT)) +#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \ + TX_CFG_DESC_RINGN_SHIFT(y)) & \ + TX_CFG_DESC_RINGN_MASK(y)) + +/* min is 2k, but we can't do jumbo frames unless it's at least 8k */ +#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */ +#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */ +#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */ + +#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in + bytes. 0 - 9256 */ +#define TX_DESC_BUFLEN_SHIFT 0 +#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. # + of bytes to be + skipped before + csum calc begins. + value must be + even */ +#define TX_DESC_CSUM_START_SHIFT 15 +#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff. + byte offset w/in + the pkt for the + 1st csum byte. + must be > 8 */ +#define TX_DESC_CSUM_STUFF_SHIFT 21 +#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */ +#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */ +#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */ +#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */ +#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only. + CRC will not be + inserted into + outgoing frame. */ +struct cas_tx_desc { + u64 control; + u64 buffer; +}; + +/* descriptor ring for free buffers contains page-sized buffers. the index + * value is not used by the hw in any way. it's just stored and returned in + * the completion ring. + */ +struct cas_rx_desc { + u64 index; + u64 buffer; +}; + +/* received packets are put on the completion ring. */ +/* word 1 */ +#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL +#define RX_COMP1_DATA_SIZE_SHIFT 13 +#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL +#define RX_COMP1_DATA_OFF_SHIFT 27 +#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL +#define RX_COMP1_DATA_INDEX_SHIFT 41 +#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL +#define RX_COMP1_SKIP_SHIFT 55 +#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL +#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL +#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL +#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL +#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL +#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL +#define RX_COMP1_TYPE_SHIFT 62 + +/* word 2 */ +#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL +#define RX_COMP2_NEXT_INDEX_SHIFT 21 +#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL +#define RX_COMP2_HDR_SIZE_SHIFT 35 +#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL +#define RX_COMP2_HDR_OFF_SHIFT 44 +#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL +#define RX_COMP2_HDR_INDEX_SHIFT 50 + +/* word 3 */ +#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL +#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL +#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL +#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL +#define RX_COMP3_CSUM_START_SHIFT 12 +#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL +#define RX_COMP3_FLOWID_SHIFT 19 +#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL +#define RX_COMP3_OPCODE_SHIFT 25 +#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL +#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL +#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL +#define RX_COMP3_LOAD_BAL_SHIFT 35 +#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */ +#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */ +#define RX_COMP3_L3_HEAD_OFF_SHIFT 41 +#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */ +#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42 +#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL +#define RX_COMP3_SAP_SHIFT 48 + +/* word 4 */ +#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL +#define RX_COMP4_TCP_CSUM_SHIFT 0 +#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL +#define RX_COMP4_PKT_LEN_SHIFT 16 +#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL +#define RX_COMP4_PERFECT_MATCH_SHIFT 30 +#define RX_COMP4_ZERO 0x0000080000000000ULL +#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL +#define RX_COMP4_HASH_VAL_SHIFT 44 +#define RX_COMP4_HASH_PASS 0x1000000000000000ULL +#define RX_COMP4_BAD 0x4000000000000000ULL +#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL + +/* we encode the following: ring/index/release. only 14 bits + * are usable. + * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and + * MAX_RX_DESC_RINGS. */ +#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL +#define RX_INDEX_NUM_SHIFT 0 +#define RX_INDEX_RING_MASK 0x0000000000001000ULL +#define RX_INDEX_RING_SHIFT 12 +#define RX_INDEX_RELEASE 0x0000000000002000ULL + +struct cas_rx_comp { + u64 word1; + u64 word2; + u64 word3; + u64 word4; +}; + +enum link_state { + link_down = 0, /* No link, will retry */ + link_aneg, /* Autoneg in progress */ + link_force_try, /* Try Forced link speed */ + link_force_ret, /* Forced mode worked, retrying autoneg */ + link_force_ok, /* Stay in forced mode */ + link_up /* Link is up */ +}; + +typedef struct cas_page { + struct list_head list; + struct page *buffer; + dma_addr_t dma_addr; + int used; +} cas_page_t; + + +/* some alignment constraints: + * TX DESC, RX DESC, and RX COMP must each be 8K aligned. + * TX COMPWB must be 8-byte aligned. + * to accomplish this, here's what we do: + * + * INIT_BLOCK_RX_COMP = 64k (already aligned) + * INIT_BLOCK_RX_DESC = 8k + * INIT_BLOCK_TX = 8k + * INIT_BLOCK_RX1_DESC = 8k + * TX COMPWB + */ +#define INIT_BLOCK_TX (TX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE) + +struct cas_init_block { + struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; + struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; + struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; + u64 tx_compwb; +}; + +/* tiny buffers to deal with target abort issue. we allocate a bit + * over so that we don't have target abort issues with these buffers + * as well. + */ +#define TX_TINY_BUF_LEN 0x100 +#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN) + +struct cas_tiny_count { + int nbufs; + int used; +}; + +struct cas { + spinlock_t lock; /* for most bits */ + spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */ + spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */ + spinlock_t rx_inuse_lock; /* rx inuse list */ + spinlock_t rx_spare_lock; /* rx spare list */ + + void __iomem *regs; + int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS]; + int rx_old[N_RX_DESC_RINGS]; + int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; + int rx_last[N_RX_DESC_RINGS]; + + /* Set when chip is actually in operational state + * (ie. not power managed) */ + int hw_running; + int opened; + struct semaphore pm_sem; /* open/close/suspend/resume */ + + struct cas_init_block *init_block; + struct cas_tx_desc *init_txds[MAX_TX_RINGS]; + struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS]; + struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS]; + + /* we use sk_buffs for tx and pages for rx. the rx skbuffs + * are there for flow re-assembly. */ + struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE]; + struct sk_buff_head rx_flows[N_RX_FLOWS]; + cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE]; + struct list_head rx_spare_list, rx_inuse_list; + int rx_spares_needed; + + /* for small packets when copying would be quicker than + mapping */ + struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE]; + u8 *tx_tiny_bufs[N_TX_RINGS]; + + u32 msg_enable; + + /* N_TX_RINGS must be >= N_RX_DESC_RINGS */ + struct net_device_stats net_stats[N_TX_RINGS + 1]; + + u32 pci_cfg[64 >> 2]; + u8 pci_revision; + + int phy_type; + int phy_addr; + u32 phy_id; +#define CAS_FLAG_1000MB_CAP 0x00000001 +#define CAS_FLAG_REG_PLUS 0x00000002 +#define CAS_FLAG_TARGET_ABORT 0x00000004 +#define CAS_FLAG_SATURN 0x00000008 +#define CAS_FLAG_RXD_POST_MASK 0x000000F0 +#define CAS_FLAG_RXD_POST_SHIFT 4 +#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \ + CAS_FLAG_RXD_POST_MASK) +#define CAS_FLAG_ENTROPY_DEV 0x00000100 +#define CAS_FLAG_NO_HW_CSUM 0x00000200 + u32 cas_flags; + int packet_min; /* minimum packet size */ + int tx_fifo_size; + int rx_fifo_size; + int rx_pause_off; + int rx_pause_on; + int crc_size; /* 4 if half-duplex */ + + int pci_irq_INTC; + int min_frame_size; /* for tx fifo workaround */ + + /* page size allocation */ + int page_size; + int page_order; + int mtu_stride; + + u32 mac_rx_cfg; + + /* Autoneg & PHY control */ + int link_cntl; + int link_fcntl; + enum link_state lstate; + struct timer_list link_timer; + int timer_ticks; + struct work_struct reset_task; +#if 0 + atomic_t reset_task_pending; +#else + atomic_t reset_task_pending; + atomic_t reset_task_pending_mtu; + atomic_t reset_task_pending_spare; + atomic_t reset_task_pending_all; +#endif + +#ifdef CONFIG_CASSINI_QGE_DEBUG + atomic_t interrupt_seen; /* 1 if any interrupts are getting through */ +#endif + + /* Link-down problem workaround */ +#define LINK_TRANSITION_UNKNOWN 0 +#define LINK_TRANSITION_ON_FAILURE 1 +#define LINK_TRANSITION_STILL_FAILED 2 +#define LINK_TRANSITION_LINK_UP 3 +#define LINK_TRANSITION_LINK_CONFIG 4 +#define LINK_TRANSITION_LINK_DOWN 5 +#define LINK_TRANSITION_REQUESTED_RESET 6 + int link_transition; + int link_transition_jiffies_valid; + unsigned long link_transition_jiffies; + + /* Tuning */ + u8 orig_cacheline_size; /* value when loaded */ +#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */ + + /* Diagnostic counters and state. */ + int casreg_len; /* reg-space size for dumping */ + u64 pause_entered; + u16 pause_last_time_recvd; + + dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; + struct pci_dev *pdev; + struct net_device *dev; +}; + +#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) +#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1)) +#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1)) + +#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \ + (TX_DESC_RINGN_SIZE(r) - (x) + (y))) + +#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \ + (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \ + (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1) + +#define CAS_ALIGN(addr, align) \ + (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1)) + +#define RX_FIFO_SIZE 16384 +#define EXPANSION_ROM_SIZE 65536 + +#define CAS_MC_EXACT_MATCH_SIZE 15 +#define CAS_MC_HASH_SIZE 256 +#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \ + CAS_MC_HASH_SIZE) + +#define TX_TARGET_ABORT_LEN 0x20 +#define RX_SWIVEL_OFF_VAL 0x2 +#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1) +#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1) +#define RX_BLANK_INTR_PKT_VAL 0x05 +#define RX_BLANK_INTR_TIME_VAL 0x0F +#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */ + +#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1) +#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2) + +#endif /* _CASSINI_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 68f11ac1a314..eb36fd293b41 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -392,6 +392,7 @@ #define PCI_DEVICE_ID_NS_87560_USB 0x0012 #define PCI_DEVICE_ID_NS_83815 0x0020 #define PCI_DEVICE_ID_NS_83820 0x0022 +#define PCI_DEVICE_ID_NS_SATURN 0x0035 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 #define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 #define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 @@ -983,6 +984,7 @@ #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 +#define PCI_DEVICE_ID_SUN_CASSINI 0xabba #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_640 0x0640 -- cgit v1.2.3 From 2fab35d78f32fc107e1af4b1ec23f557fa20d911 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 27 Sep 2005 15:59:43 -0700 Subject: [NET]: Fix GCC4 compile error: sysctl in linux/if_ether.h The following is generated when compiling a recent (2.6.14-rc2-git5) kernel configured for ARM, with GCC4. CC init/main.o In file included from include/linux/netdevice.h:29, from include/net/sock.h:48, from init/main.c:50: include/linux/if_ether.h:114: error: array type has incomplete element type It seems that if CONFIG_SYSCTL is not set, then the compiler will throw an error due to the definition of the ether_table[] array Attached is a solution to the problem Signed-off-by: Ben Dooks Signed-off-by: David S. Miller --- include/linux/if_ether.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index fc2d4c8225aa..d21c305c6c64 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -111,7 +111,9 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb->mac.raw; } +#ifdef CONFIG_SYSCTL extern struct ctl_table ether_table[]; #endif +#endif #endif /* _LINUX_IF_ETHER_H */ -- cgit v1.2.3 From 664cceb0093b755739e56572b836a99104ee8a75 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 28 Sep 2005 17:03:15 +0100 Subject: [PATCH] Keys: Add possessor permissions to keys [try #3] The attached patch adds extra permission grants to keys for the possessor of a key in addition to the owner, group and other permissions bits. This makes SUID binaries easier to support without going as far as labelling keys and key targets using the LSM facilities. This patch adds a second "pointer type" to key structures (struct key_ref *) that can have the bottom bit of the address set to indicate the possession of a key. This is propagated through searches from the keyring to the discovered key. It has been made a separate type so that the compiler can spot attempts to dereference a potentially incorrect pointer. The "possession" attribute can't be attached to a key structure directly as it's not an intrinsic property of a key. Pointers to keys have been replaced with struct key_ref *'s wherever possession information needs to be passed through. This does assume that the bottom bit of the pointer will always be zero on return from kmem_cache_alloc(). The key reference type has been made into a typedef so that at least it can be located in the sources, even though it's basically a pointer to an undefined type. I've also renamed the accessor functions to be more useful, and all reference variables should now end in "_ref". Signed-Off-By: David Howells Signed-off-by: Linus Torvalds --- Documentation/keys.txt | 74 +++++++--- include/linux/key-ui.h | 28 ++-- include/linux/key.h | 78 ++++++++-- security/keys/internal.h | 26 ++-- security/keys/key.c | 81 ++++++----- security/keys/keyctl.c | 301 ++++++++++++++++++++------------------- security/keys/keyring.c | 86 ++++++----- security/keys/proc.c | 2 +- security/keys/process_keys.c | 164 +++++++++++++-------- security/keys/request_key.c | 36 ++++- security/keys/request_key_auth.c | 2 +- 11 files changed, 528 insertions(+), 350 deletions(-) (limited to 'include/linux') diff --git a/Documentation/keys.txt b/Documentation/keys.txt index 0321ded4b9ae..b22e7c8d059a 100644 --- a/Documentation/keys.txt +++ b/Documentation/keys.txt @@ -195,8 +195,8 @@ KEY ACCESS PERMISSIONS ====================== Keys have an owner user ID, a group access ID, and a permissions mask. The mask -has up to eight bits each for user, group and other access. Only five of each -set of eight bits are defined. These permissions granted are: +has up to eight bits each for possessor, user, group and other access. Only +five of each set of eight bits are defined. These permissions granted are: (*) View @@ -241,16 +241,16 @@ about the status of the key service: type, description and permissions. The payload of the key is not available this way: - SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY - 00000001 I----- 39 perm 1f0000 0 0 keyring _uid_ses.0: 1/4 - 00000002 I----- 2 perm 1f0000 0 0 keyring _uid.0: empty - 00000007 I----- 1 perm 1f0000 0 0 keyring _pid.1: empty - 0000018d I----- 1 perm 1f0000 0 0 keyring _pid.412: empty - 000004d2 I--Q-- 1 perm 1f0000 32 -1 keyring _uid.32: 1/4 - 000004d3 I--Q-- 3 perm 1f0000 32 -1 keyring _uid_ses.32: empty - 00000892 I--QU- 1 perm 1f0000 0 0 user metal:copper: 0 - 00000893 I--Q-N 1 35s 1f0000 0 0 user metal:silver: 0 - 00000894 I--Q-- 1 10h 1f0000 0 0 user metal:gold: 0 + SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY + 00000001 I----- 39 perm 1f1f0000 0 0 keyring _uid_ses.0: 1/4 + 00000002 I----- 2 perm 1f1f0000 0 0 keyring _uid.0: empty + 00000007 I----- 1 perm 1f1f0000 0 0 keyring _pid.1: empty + 0000018d I----- 1 perm 1f1f0000 0 0 keyring _pid.412: empty + 000004d2 I--Q-- 1 perm 1f1f0000 32 -1 keyring _uid.32: 1/4 + 000004d3 I--Q-- 3 perm 1f1f0000 32 -1 keyring _uid_ses.32: empty + 00000892 I--QU- 1 perm 1f000000 0 0 user metal:copper: 0 + 00000893 I--Q-N 1 35s 1f1f0000 0 0 user metal:silver: 0 + 00000894 I--Q-- 1 10h 001f0000 0 0 user metal:gold: 0 The flags are: @@ -637,6 +637,34 @@ call, and the key released upon close. How to deal with conflicting keys due to two different users opening the same file is left to the filesystem author to solve. +Note that there are two different types of pointers to keys that may be +encountered: + + (*) struct key * + + This simply points to the key structure itself. Key structures will be at + least four-byte aligned. + + (*) key_ref_t + + This is equivalent to a struct key *, but the least significant bit is set + if the caller "possesses" the key. By "possession" it is meant that the + calling processes has a searchable link to the key from one of its + keyrings. There are three functions for dealing with these: + + key_ref_t make_key_ref(const struct key *key, + unsigned long possession); + + struct key *key_ref_to_ptr(const key_ref_t key_ref); + + unsigned long is_key_possessed(const key_ref_t key_ref); + + The first function constructs a key reference from a key pointer and + possession information (which must be 0 or 1 and not any other value). + + The second function retrieves the key pointer from a reference and the + third retrieves the possession flag. + When accessing a key's payload contents, certain precautions must be taken to prevent access vs modification races. See the section "Notes on accessing payload contents" for more information. @@ -665,7 +693,11 @@ payload contents" for more information. void key_put(struct key *key); - This can be called from interrupt context. If CONFIG_KEYS is not set then + Or: + + void key_ref_put(key_ref_t key_ref); + + These can be called from interrupt context. If CONFIG_KEYS is not set then the argument will not be parsed. @@ -689,13 +721,17 @@ payload contents" for more information. (*) If a keyring was found in the search, this can be further searched by: - struct key *keyring_search(struct key *keyring, - const struct key_type *type, - const char *description) + key_ref_t keyring_search(key_ref_t keyring_ref, + const struct key_type *type, + const char *description) This searches the keyring tree specified for a matching key. Error ENOKEY - is returned upon failure. If successful, the returned key will need to be - released. + is returned upon failure (use IS_ERR/PTR_ERR to determine). If successful, + the returned key will need to be released. + + The possession attribute from the keyring reference is used to control + access through the permissions mask and is propagated to the returned key + reference pointer if successful. (*) To check the validity of a key, this function can be called: @@ -732,7 +768,7 @@ More complex payload contents must be allocated and a pointer to them set in key->payload.data. One of the following ways must be selected to access the data: - (1) Unmodifyable key type. + (1) Unmodifiable key type. If the key type does not have a modify method, then the key's payload can be accessed without any form of locking, provided that it's known to be diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h index cc326174a808..918c34a8347e 100644 --- a/include/linux/key-ui.h +++ b/include/linux/key-ui.h @@ -42,11 +42,14 @@ struct keyring_list { /* * check to see whether permission is granted to use a key in the desired way */ -static inline int key_permission(const struct key *key, key_perm_t perm) +static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) { + struct key *key = key_ref_to_ptr(key_ref); key_perm_t kperm; - if (key->uid == current->fsuid) + if (is_key_possessed(key_ref)) + kperm = key->perm >> 24; + else if (key->uid == current->fsuid) kperm = key->perm >> 16; else if (key->gid != -1 && key->perm & KEY_GRP_ALL && @@ -65,11 +68,14 @@ static inline int key_permission(const struct key *key, key_perm_t perm) * check to see whether permission is granted to use a key in at least one of * the desired ways */ -static inline int key_any_permission(const struct key *key, key_perm_t perm) +static inline int key_any_permission(const key_ref_t key_ref, key_perm_t perm) { + struct key *key = key_ref_to_ptr(key_ref); key_perm_t kperm; - if (key->uid == current->fsuid) + if (is_key_possessed(key_ref)) + kperm = key->perm >> 24; + else if (key->uid == current->fsuid) kperm = key->perm >> 16; else if (key->gid != -1 && key->perm & KEY_GRP_ALL && @@ -94,13 +100,17 @@ static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid) return ret; } -static inline int key_task_permission(const struct key *key, +static inline int key_task_permission(const key_ref_t key_ref, struct task_struct *context, key_perm_t perm) { + struct key *key = key_ref_to_ptr(key_ref); key_perm_t kperm; - if (key->uid == context->fsuid) { + if (is_key_possessed(key_ref)) { + kperm = key->perm >> 24; + } + else if (key->uid == context->fsuid) { kperm = key->perm >> 16; } else if (key->gid != -1 && @@ -121,9 +131,9 @@ static inline int key_task_permission(const struct key *key, } -extern struct key *lookup_user_key(struct task_struct *context, - key_serial_t id, int create, int partial, - key_perm_t perm); +extern key_ref_t lookup_user_key(struct task_struct *context, + key_serial_t id, int create, int partial, + key_perm_t perm); extern long join_session_keyring(const char *name); diff --git a/include/linux/key.h b/include/linux/key.h index 970bbd916cf4..f1efa016dbf3 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -35,11 +35,18 @@ struct key; #undef KEY_DEBUGGING -#define KEY_USR_VIEW 0x00010000 /* user can view a key's attributes */ -#define KEY_USR_READ 0x00020000 /* user can read key payload / view keyring */ -#define KEY_USR_WRITE 0x00040000 /* user can update key payload / add link to keyring */ -#define KEY_USR_SEARCH 0x00080000 /* user can find a key in search / search a keyring */ -#define KEY_USR_LINK 0x00100000 /* user can create a link to a key/keyring */ +#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */ +#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */ +#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */ +#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */ +#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */ +#define KEY_POS_ALL 0x1f000000 + +#define KEY_USR_VIEW 0x00010000 /* user permissions... */ +#define KEY_USR_READ 0x00020000 +#define KEY_USR_WRITE 0x00040000 +#define KEY_USR_SEARCH 0x00080000 +#define KEY_USR_LINK 0x00100000 #define KEY_USR_ALL 0x001f0000 #define KEY_GRP_VIEW 0x00000100 /* group permissions... */ @@ -65,6 +72,38 @@ struct key_owner; struct keyring_list; struct keyring_name; +/*****************************************************************************/ +/* + * key reference with possession attribute handling + * + * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually + * defined. This is because we abuse the bottom bit of the reference to carry a + * flag to indicate whether the calling process possesses that key in one of + * its keyrings. + * + * the key_ref_t has been made a separate type so that the compiler can reject + * attempts to dereference it without proper conversion. + * + * the three functions are used to assemble and disassemble references + */ +typedef struct __key_reference_with_attributes *key_ref_t; + +static inline key_ref_t make_key_ref(const struct key *key, + unsigned long possession) +{ + return (key_ref_t) ((unsigned long) key | possession); +} + +static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) +{ + return (struct key *) ((unsigned long) key_ref & ~1UL); +} + +static inline unsigned long is_key_possessed(const key_ref_t key_ref) +{ + return (unsigned long) key_ref & 1UL; +} + /*****************************************************************************/ /* * authentication token / access credential / keyring @@ -215,20 +254,25 @@ static inline struct key *key_get(struct key *key) return key; } +static inline void key_ref_put(key_ref_t key_ref) +{ + key_put(key_ref_to_ptr(key_ref)); +} + extern struct key *request_key(struct key_type *type, const char *description, const char *callout_info); extern int key_validate(struct key *key); -extern struct key *key_create_or_update(struct key *keyring, - const char *type, - const char *description, - const void *payload, - size_t plen, - int not_in_quota); +extern key_ref_t key_create_or_update(key_ref_t keyring, + const char *type, + const char *description, + const void *payload, + size_t plen, + int not_in_quota); -extern int key_update(struct key *key, +extern int key_update(key_ref_t key, const void *payload, size_t plen); @@ -243,9 +287,9 @@ extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, extern int keyring_clear(struct key *keyring); -extern struct key *keyring_search(struct key *keyring, - struct key_type *type, - const char *description); +extern key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description); extern int keyring_add_key(struct key *keyring, struct key *key); @@ -285,6 +329,10 @@ extern void key_init(void); #define key_serial(k) 0 #define key_get(k) ({ NULL; }) #define key_put(k) do { } while(0) +#define key_ref_put(k) do { } while(0) +#define make_key_ref(k) ({ NULL; }) +#define key_ref_to_ptr(k) ({ NULL; }) +#define is_key_possessed(k) 0 #define alloc_uid_keyring(u) 0 #define switch_uid_keyring(u) do { } while(0) #define __install_session_keyring(t, k) ({ NULL; }) diff --git a/security/keys/internal.h b/security/keys/internal.h index 46c8602661c9..db99ed434f3a 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -71,26 +71,26 @@ extern void keyring_publish_name(struct key *keyring); extern int __key_link(struct key *keyring, struct key *key); -extern struct key *__keyring_search_one(struct key *keyring, - const struct key_type *type, - const char *description, - key_perm_t perm); +extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, + const struct key_type *type, + const char *description, + key_perm_t perm); extern struct key *keyring_search_instkey(struct key *keyring, key_serial_t target_id); typedef int (*key_match_func_t)(const struct key *, const void *); -extern struct key *keyring_search_aux(struct key *keyring, - struct task_struct *tsk, - struct key_type *type, - const void *description, - key_match_func_t match); +extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct task_struct *tsk, + struct key_type *type, + const void *description, + key_match_func_t match); -extern struct key *search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - struct task_struct *tsk); +extern key_ref_t search_process_keyrings(struct key_type *type, + const void *description, + key_match_func_t match, + struct task_struct *tsk); extern struct key *find_keyring_by_name(const char *name, key_serial_t bound); diff --git a/security/keys/key.c b/security/keys/key.c index fb89f9844465..2182be9e9309 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -693,14 +693,15 @@ void key_type_put(struct key_type *ktype) * - the key has an incremented refcount * - we need to put the key if we get an error */ -static inline struct key *__key_update(struct key *key, const void *payload, - size_t plen) +static inline key_ref_t __key_update(key_ref_t key_ref, + const void *payload, size_t plen) { + struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = -EACCES; - if (!key_permission(key, KEY_WRITE)) + if (!key_permission(key_ref, KEY_WRITE)) goto error; ret = -EEXIST; @@ -719,12 +720,12 @@ static inline struct key *__key_update(struct key *key, const void *payload, if (ret < 0) goto error; - out: - return key; +out: + return key_ref; - error: +error: key_put(key); - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); goto out; } /* end __key_update() */ @@ -734,52 +735,56 @@ static inline struct key *__key_update(struct key *key, const void *payload, * search the specified keyring for a key of the same description; if one is * found, update it, otherwise add a new one */ -struct key *key_create_or_update(struct key *keyring, - const char *type, - const char *description, - const void *payload, - size_t plen, - int not_in_quota) +key_ref_t key_create_or_update(key_ref_t keyring_ref, + const char *type, + const char *description, + const void *payload, + size_t plen, + int not_in_quota) { struct key_type *ktype; - struct key *key = NULL; + struct key *keyring, *key = NULL; key_perm_t perm; + key_ref_t key_ref; int ret; - key_check(keyring); - /* look up the key type to see if it's one of the registered kernel * types */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { - key = ERR_PTR(-ENODEV); + key_ref = ERR_PTR(-ENODEV); goto error; } - ret = -EINVAL; + key_ref = ERR_PTR(-EINVAL); if (!ktype->match || !ktype->instantiate) goto error_2; + keyring = key_ref_to_ptr(keyring_ref); + + key_check(keyring); + + down_write(&keyring->sem); + + /* if we're going to allocate a new key, we're going to have + * to modify the keyring */ + key_ref = ERR_PTR(-EACCES); + if (!key_permission(keyring_ref, KEY_WRITE)) + goto error_3; + /* search for an existing key of the same type and description in the * destination keyring */ - down_write(&keyring->sem); - - key = __keyring_search_one(keyring, ktype, description, 0); - if (!IS_ERR(key)) + key_ref = __keyring_search_one(keyring_ref, ktype, description, 0); + if (!IS_ERR(key_ref)) goto found_matching_key; - /* if we're going to allocate a new key, we're going to have to modify - * the keyring */ - ret = -EACCES; - if (!key_permission(keyring, KEY_WRITE)) - goto error_3; - /* decide on the permissions we want */ - perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; + perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK; + perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; if (ktype->read) - perm |= KEY_USR_READ; + perm |= KEY_POS_READ | KEY_USR_READ; if (ktype == &key_type_keyring || ktype->update) perm |= KEY_USR_WRITE; @@ -788,7 +793,7 @@ struct key *key_create_or_update(struct key *keyring, key = key_alloc(ktype, description, current->fsuid, current->fsgid, perm, not_in_quota); if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = ERR_PTR(PTR_ERR(key)); goto error_3; } @@ -796,15 +801,18 @@ struct key *key_create_or_update(struct key *keyring, ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); if (ret < 0) { key_put(key); - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); + goto error_3; } + key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); + error_3: up_write(&keyring->sem); error_2: key_type_put(ktype); error: - return key; + return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it @@ -813,7 +821,7 @@ struct key *key_create_or_update(struct key *keyring, up_write(&keyring->sem); key_type_put(ktype); - key = __key_update(key, payload, plen); + key_ref = __key_update(key_ref, payload, plen); goto error; } /* end key_create_or_update() */ @@ -824,15 +832,16 @@ EXPORT_SYMBOL(key_create_or_update); /* * update a key */ -int key_update(struct key *key, const void *payload, size_t plen) +int key_update(key_ref_t key_ref, const void *payload, size_t plen) { + struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = -EACCES; - if (!key_permission(key, KEY_WRITE)) + if (!key_permission(key_ref, KEY_WRITE)) goto error; /* attempt to update it if supported */ diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index a6516a64b297..4c670ee6acf9 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -34,7 +34,7 @@ asmlinkage long sys_add_key(const char __user *_type, size_t plen, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long dlen, ret; @@ -86,25 +86,25 @@ asmlinkage long sys_add_key(const char __user *_type, } /* find the target keyring (which must be writable) */ - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ - key = key_create_or_update(keyring, type, description, - payload, plen, 0); - if (!IS_ERR(key)) { - ret = key->serial; - key_put(key); + key_ref = key_create_or_update(keyring_ref, type, description, + payload, plen, 0); + if (!IS_ERR(key_ref)) { + ret = key_ref_to_ptr(key_ref)->serial; + key_ref_put(key_ref); } else { - ret = PTR_ERR(key); + ret = PTR_ERR(key_ref); } - key_put(keyring); + key_ref_put(keyring_ref); error3: kfree(payload); error2: @@ -131,7 +131,8 @@ asmlinkage long sys_request_key(const char __user *_type, key_serial_t destringid) { struct key_type *ktype; - struct key *key, *dest; + struct key *key; + key_ref_t dest_ref; char type[32], *description, *callout_info; long dlen, ret; @@ -187,11 +188,11 @@ asmlinkage long sys_request_key(const char __user *_type, } /* get the destination keyring if specified */ - dest = NULL; + dest_ref = NULL; if (destringid) { - dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); - if (IS_ERR(dest)) { - ret = PTR_ERR(dest); + dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); + if (IS_ERR(dest_ref)) { + ret = PTR_ERR(dest_ref); goto error3; } } @@ -204,7 +205,8 @@ asmlinkage long sys_request_key(const char __user *_type, } /* do the search */ - key = request_key_and_link(ktype, description, callout_info, dest); + key = request_key_and_link(ktype, description, callout_info, + key_ref_to_ptr(dest_ref)); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; @@ -216,7 +218,7 @@ asmlinkage long sys_request_key(const char __user *_type, error5: key_type_put(ktype); error4: - key_put(dest); + key_ref_put(dest_ref); error3: kfree(callout_info); error2: @@ -234,17 +236,17 @@ asmlinkage long sys_request_key(const char __user *_type, */ long keyctl_get_keyring_ID(key_serial_t id, int create) { - struct key *key; + key_ref_t key_ref; long ret; - key = lookup_user_key(NULL, id, create, 0, KEY_SEARCH); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, create, 0, KEY_SEARCH); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } - ret = key->serial; - key_put(key); + ret = key_ref_to_ptr(key_ref)->serial; + key_ref_put(key_ref); error: return ret; @@ -302,7 +304,7 @@ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { - struct key *key; + key_ref_t key_ref; void *payload; long ret; @@ -324,16 +326,16 @@ long keyctl_update_key(key_serial_t id, } /* find the target key (which must be writable) */ - key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } /* update the key */ - ret = key_update(key, payload, plen); + ret = key_update(key_ref, payload, plen); - key_put(key); + key_ref_put(key_ref); error2: kfree(payload); error: @@ -349,19 +351,19 @@ long keyctl_update_key(key_serial_t id, */ long keyctl_revoke_key(key_serial_t id) { - struct key *key; + key_ref_t key_ref; long ret; - key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } - key_revoke(key); + key_revoke(key_ref_to_ptr(key_ref)); ret = 0; - key_put(key); + key_ref_put(key_ref); error: return ret; @@ -375,18 +377,18 @@ long keyctl_revoke_key(key_serial_t id) */ long keyctl_keyring_clear(key_serial_t ringid) { - struct key *keyring; + key_ref_t keyring_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - ret = keyring_clear(keyring); + ret = keyring_clear(key_ref_to_ptr(keyring_ref)); - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -401,26 +403,26 @@ long keyctl_keyring_clear(key_serial_t ringid) */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - key = lookup_user_key(NULL, id, 1, 0, KEY_LINK); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 0, KEY_LINK); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } - ret = key_link(keyring, key); + ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); - key_put(key); + key_ref_put(key_ref); error2: - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -435,26 +437,26 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - key = lookup_user_key(NULL, id, 0, 0, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } - ret = key_unlink(keyring, key); + ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); - key_put(key); + key_ref_put(key_ref); error2: - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -476,24 +478,26 @@ long keyctl_describe_key(key_serial_t keyid, size_t buflen) { struct key *key, *instkey; + key_ref_t key_ref; char *tmpbuf; long ret; - key = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); - if (IS_ERR(key)) { + key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); + if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ - if (PTR_ERR(key) == -EACCES) { + if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); - key = lookup_user_key(NULL, keyid, 0, 1, 0); - if (!IS_ERR(key)) + key_ref = lookup_user_key(NULL, keyid, + 0, 1, 0); + if (!IS_ERR(key_ref)) goto okay; } } - ret = PTR_ERR(key); + ret = PTR_ERR(key_ref); goto error; } @@ -504,13 +508,16 @@ okay: if (!tmpbuf) goto error2; + key = key_ref_to_ptr(key_ref); + ret = snprintf(tmpbuf, PAGE_SIZE - 1, - "%s;%d;%d;%06x;%s", - key->type->name, - key->uid, - key->gid, - key->perm, - key->description ? key->description :"" + "%s;%d;%d;%08x;%s", + key_ref_to_ptr(key_ref)->type->name, + key_ref_to_ptr(key_ref)->uid, + key_ref_to_ptr(key_ref)->gid, + key_ref_to_ptr(key_ref)->perm, + key_ref_to_ptr(key_ref)->description ? + key_ref_to_ptr(key_ref)->description : "" ); /* include a NUL char at the end of the data */ @@ -530,7 +537,7 @@ okay: kfree(tmpbuf); error2: - key_put(key); + key_ref_put(key_ref); error: return ret; @@ -552,7 +559,7 @@ long keyctl_keyring_search(key_serial_t ringid, key_serial_t destringid) { struct key_type *ktype; - struct key *keyring, *key, *dest; + key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long dlen, ret; @@ -581,18 +588,18 @@ long keyctl_keyring_search(key_serial_t ringid, goto error2; /* get the keyring at which to begin the search */ - keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ - dest = NULL; + dest_ref = NULL; if (destringid) { - dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); - if (IS_ERR(dest)) { - ret = PTR_ERR(dest); + dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); + if (IS_ERR(dest_ref)) { + ret = PTR_ERR(dest_ref); goto error3; } } @@ -605,9 +612,9 @@ long keyctl_keyring_search(key_serial_t ringid, } /* do the search */ - key = keyring_search(keyring, ktype, description); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = keyring_search(keyring_ref, ktype, description); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) @@ -616,26 +623,26 @@ long keyctl_keyring_search(key_serial_t ringid, } /* link the resulting key to the destination keyring if we can */ - if (dest) { + if (dest_ref) { ret = -EACCES; - if (!key_permission(key, KEY_LINK)) + if (!key_permission(key_ref, KEY_LINK)) goto error6; - ret = key_link(dest, key); + ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } - ret = key->serial; + ret = key_ref_to_ptr(key_ref)->serial; error6: - key_put(key); + key_ref_put(key_ref); error5: key_type_put(ktype); error4: - key_put(dest); + key_ref_put(dest_ref); error3: - key_put(keyring); + key_ref_put(keyring_ref); error2: kfree(description); error: @@ -643,16 +650,6 @@ long keyctl_keyring_search(key_serial_t ringid, } /* end keyctl_keyring_search() */ -/*****************************************************************************/ -/* - * see if the key we're looking at is the target key - */ -static int keyctl_read_key_same(const struct key *key, const void *target) -{ - return key == target; - -} /* end keyctl_read_key_same() */ - /*****************************************************************************/ /* * read a user key's payload @@ -665,38 +662,33 @@ static int keyctl_read_key_same(const struct key *key, const void *target) */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { - struct key *key, *skey; + struct key *key; + key_ref_t key_ref; long ret; /* find the key first */ - key = lookup_user_key(NULL, keyid, 0, 0, 0); - if (!IS_ERR(key)) { - /* see if we can read it directly */ - if (key_permission(key, KEY_READ)) - goto can_read_key; - - /* we can't; see if it's searchable from this process's - * keyrings - * - we automatically take account of the fact that it may be - * dangling off an instantiation key - */ - skey = search_process_keyrings(key->type, key, - keyctl_read_key_same, current); - if (!IS_ERR(skey)) - goto can_read_key2; - - ret = PTR_ERR(skey); - if (ret == -EAGAIN) - ret = -EACCES; - goto error2; + key_ref = lookup_user_key(NULL, keyid, 0, 0, 0); + if (IS_ERR(key_ref)) { + ret = -ENOKEY; + goto error; } - ret = -ENOKEY; - goto error; + key = key_ref_to_ptr(key_ref); + + /* see if we can read it directly */ + if (key_permission(key_ref, KEY_READ)) + goto can_read_key; + + /* we can't; see if it's searchable from this process's keyrings + * - we automatically take account of the fact that it may be + * dangling off an instantiation key + */ + if (!is_key_possessed(key_ref)) { + ret = -EACCES; + goto error2; + } /* the key is probably readable - now try to read it */ - can_read_key2: - key_put(skey); can_read_key: ret = key_validate(key); if (ret == 0) { @@ -727,18 +719,21 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) { struct key *key; + key_ref_t key_ref; long ret; ret = 0; if (uid == (uid_t) -1 && gid == (gid_t) -1) goto error; - key = lookup_user_key(NULL, id, 1, 1, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 1, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } + key = key_ref_to_ptr(key_ref); + /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); @@ -784,18 +779,21 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; + key_ref_t key_ref; long ret; ret = -EINVAL; - if (perm & ~(KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) + if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; - key = lookup_user_key(NULL, id, 1, 1, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 1, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } + key = key_ref_to_ptr(key_ref); + /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); @@ -824,7 +822,8 @@ long keyctl_instantiate_key(key_serial_t id, key_serial_t ringid) { struct request_key_auth *rka; - struct key *instkey, *keyring; + struct key *instkey; + key_ref_t keyring_ref; void *payload; long ret; @@ -857,21 +856,21 @@ long keyctl_instantiate_key(key_serial_t id, /* find the destination keyring amongst those belonging to the * requesting task */ - keyring = NULL; + keyring_ref = NULL; if (ringid) { - keyring = lookup_user_key(rka->context, ringid, 1, 0, - KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(rka->context, ringid, 1, 0, + KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error3; } } /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, - keyring, instkey); + key_ref_to_ptr(keyring_ref), instkey); - key_put(keyring); + key_ref_put(keyring_ref); error3: key_put(instkey); error2: @@ -889,7 +888,8 @@ long keyctl_instantiate_key(key_serial_t id, long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { struct request_key_auth *rka; - struct key *instkey, *keyring; + struct key *instkey; + key_ref_t keyring_ref; long ret; /* find the instantiation authorisation key */ @@ -903,19 +903,20 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) /* find the destination keyring if present (which must also be * writable) */ - keyring = NULL; + keyring_ref = NULL; if (ringid) { - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error2; } } /* instantiate the key and link it into a keyring */ - ret = key_negate_and_link(rka->target_key, timeout, keyring, instkey); + ret = key_negate_and_link(rka->target_key, timeout, + key_ref_to_ptr(keyring_ref), instkey); - key_put(keyring); + key_ref_put(keyring_ref); error2: key_put(instkey); error: diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 9c208c756df8..0639396dd441 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -309,7 +309,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, int ret; keyring = key_alloc(&key_type_keyring, description, - uid, gid, KEY_USR_ALL, not_in_quota); + uid, gid, KEY_POS_ALL | KEY_USR_ALL, not_in_quota); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); @@ -333,12 +333,13 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, * - we rely on RCU to prevent the keyring lists from disappearing on us * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we only found negative matching keys + * - we propagate the possession attribute from the keyring ref to the key ref */ -struct key *keyring_search_aux(struct key *keyring, - struct task_struct *context, - struct key_type *type, - const void *description, - key_match_func_t match) +key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct task_struct *context, + struct key_type *type, + const void *description, + key_match_func_t match) { struct { struct keyring_list *keylist; @@ -347,29 +348,33 @@ struct key *keyring_search_aux(struct key *keyring, struct keyring_list *keylist; struct timespec now; - struct key *key; + unsigned long possessed; + struct key *keyring, *key; + key_ref_t key_ref; long err; int sp, kix; + keyring = key_ref_to_ptr(keyring_ref); + possessed = is_key_possessed(keyring_ref); key_check(keyring); - rcu_read_lock(); - /* top keyring must have search permission to begin the search */ - key = ERR_PTR(-EACCES); - if (!key_task_permission(keyring, context, KEY_SEARCH)) + key_ref = ERR_PTR(-EACCES); + if (!key_task_permission(keyring_ref, context, KEY_SEARCH)) goto error; - key = ERR_PTR(-ENOTDIR); + key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error; + rcu_read_lock(); + now = current_kernel_time(); err = -EAGAIN; sp = 0; /* start processing a new keyring */ - descend: +descend: if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto not_this_keyring; @@ -397,7 +402,8 @@ struct key *keyring_search_aux(struct key *keyring, continue; /* key must have search permissions */ - if (!key_task_permission(key, context, KEY_SEARCH)) + if (!key_task_permission(make_key_ref(key, possessed), + context, KEY_SEARCH)) continue; /* we set a different error code if we find a negative key */ @@ -411,7 +417,7 @@ struct key *keyring_search_aux(struct key *keyring, /* search through the keyrings nested in this one */ kix = 0; - ascend: +ascend: for (; kix < keylist->nkeys; kix++) { key = keylist->keys[kix]; if (key->type != &key_type_keyring) @@ -423,7 +429,8 @@ struct key *keyring_search_aux(struct key *keyring, if (sp >= KEYRING_SEARCH_MAX_DEPTH) continue; - if (!key_task_permission(key, context, KEY_SEARCH)) + if (!key_task_permission(make_key_ref(key, possessed), + context, KEY_SEARCH)) continue; /* stack the current position */ @@ -438,7 +445,7 @@ struct key *keyring_search_aux(struct key *keyring, /* the keyring we're looking at was disqualified or didn't contain a * matching key */ - not_this_keyring: +not_this_keyring: if (sp > 0) { /* resume the processing of a keyring higher up in the tree */ sp--; @@ -447,16 +454,18 @@ struct key *keyring_search_aux(struct key *keyring, goto ascend; } - key = ERR_PTR(err); - goto error; + key_ref = ERR_PTR(err); + goto error_2; /* we found a viable match */ - found: +found: atomic_inc(&key->usage); key_check(key); - error: + key_ref = make_key_ref(key, possessed); +error_2: rcu_read_unlock(); - return key; +error: + return key_ref; } /* end keyring_search_aux() */ @@ -469,9 +478,9 @@ struct key *keyring_search_aux(struct key *keyring, * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we only found negative matching keys */ -struct key *keyring_search(struct key *keyring, - struct key_type *type, - const char *description) +key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description) { if (!type->match) return ERR_PTR(-ENOKEY); @@ -488,15 +497,19 @@ EXPORT_SYMBOL(keyring_search); * search the given keyring only (no recursion) * - keyring must be locked by caller */ -struct key *__keyring_search_one(struct key *keyring, - const struct key_type *ktype, - const char *description, - key_perm_t perm) +key_ref_t __keyring_search_one(key_ref_t keyring_ref, + const struct key_type *ktype, + const char *description, + key_perm_t perm) { struct keyring_list *klist; - struct key *key; + unsigned long possessed; + struct key *keyring, *key; int loop; + keyring = key_ref_to_ptr(keyring_ref); + possessed = is_key_possessed(keyring_ref); + rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); @@ -507,21 +520,21 @@ struct key *__keyring_search_one(struct key *keyring, if (key->type == ktype && (!key->type->match || key->type->match(key, description)) && - key_permission(key, perm) && + key_permission(make_key_ref(key, possessed), + perm) && !test_bit(KEY_FLAG_REVOKED, &key->flags) ) goto found; } } - key = ERR_PTR(-ENOKEY); - goto error; + rcu_read_unlock(); + return ERR_PTR(-ENOKEY); found: atomic_inc(&key->usage); - error: rcu_read_unlock(); - return key; + return make_key_ref(key, possessed); } /* end __keyring_search_one() */ @@ -603,7 +616,8 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound) if (strcmp(keyring->description, name) != 0) continue; - if (!key_permission(keyring, KEY_SEARCH)) + if (!key_permission(make_key_ref(keyring, 0), + KEY_SEARCH)) continue; /* found a potential candidate, but we still need to diff --git a/security/keys/proc.c b/security/keys/proc.c index c55cf1fd0826..12b750e51fbf 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -167,7 +167,7 @@ static int proc_keys_show(struct seq_file *m, void *v) #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') - seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ", + seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index c089f78fb94e..d42d2158ce13 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -39,7 +39,7 @@ struct key root_user_keyring = { .type = &key_type_keyring, .user = &root_key_user, .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), - .perm = KEY_USR_ALL, + .perm = KEY_POS_ALL | KEY_USR_ALL, .flags = 1 << KEY_FLAG_INSTANTIATED, .description = "_uid.0", #ifdef KEY_DEBUGGING @@ -54,7 +54,7 @@ struct key root_session_keyring = { .type = &key_type_keyring, .user = &root_key_user, .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), - .perm = KEY_USR_ALL, + .perm = KEY_POS_ALL | KEY_USR_ALL, .flags = 1 << KEY_FLAG_INSTANTIATED, .description = "_uid_ses.0", #ifdef KEY_DEBUGGING @@ -98,7 +98,7 @@ int alloc_uid_keyring(struct user_struct *user) user->session_keyring = session_keyring; ret = 0; - error: +error: return ret; } /* end alloc_uid_keyring() */ @@ -156,7 +156,7 @@ int install_thread_keyring(struct task_struct *tsk) ret = 0; key_put(old); - error: +error: return ret; } /* end install_thread_keyring() */ @@ -193,7 +193,7 @@ int install_process_keyring(struct task_struct *tsk) } ret = 0; - error: +error: return ret; } /* end install_process_keyring() */ @@ -236,7 +236,7 @@ static int install_session_keyring(struct task_struct *tsk, /* we're using RCU on the pointer */ synchronize_rcu(); key_put(old); - error: +error: return ret; } /* end install_session_keyring() */ @@ -376,13 +376,13 @@ void key_fsgid_changed(struct task_struct *tsk) * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we found only negative matching keys */ -struct key *search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - struct task_struct *context) +key_ref_t search_process_keyrings(struct key_type *type, + const void *description, + key_match_func_t match, + struct task_struct *context) { struct request_key_auth *rka; - struct key *key, *ret, *err, *instkey; + key_ref_t key_ref, ret, err, instkey_ref; /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; @@ -391,46 +391,48 @@ struct key *search_process_keyrings(struct key_type *type, * * in terms of priority: success > -ENOKEY > -EAGAIN > other error */ - key = NULL; + key_ref = NULL; ret = NULL; err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ if (context->thread_keyring) { - key = keyring_search_aux(context->thread_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->thread_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } /* search the process keyring second */ if (context->signal->process_keyring) { - key = keyring_search_aux(context->signal->process_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->signal->process_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } @@ -438,23 +440,25 @@ struct key *search_process_keyrings(struct key_type *type, /* search the session keyring */ if (context->signal->session_keyring) { rcu_read_lock(); - key = keyring_search_aux( - rcu_dereference(context->signal->session_keyring), + key_ref = keyring_search_aux( + make_key_ref(rcu_dereference( + context->signal->session_keyring), + 1), context, type, description, match); rcu_read_unlock(); - if (!IS_ERR(key)) + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } @@ -465,51 +469,54 @@ struct key *search_process_keyrings(struct key_type *type, goto no_key; rcu_read_lock(); - instkey = __keyring_search_one( - rcu_dereference(context->signal->session_keyring), + instkey_ref = __keyring_search_one( + make_key_ref(rcu_dereference( + context->signal->session_keyring), + 1), &key_type_request_key_auth, NULL, 0); rcu_read_unlock(); - if (IS_ERR(instkey)) + if (IS_ERR(instkey_ref)) goto no_key; - rka = instkey->payload.data; + rka = key_ref_to_ptr(instkey_ref)->payload.data; - key = search_process_keyrings(type, description, match, - rka->context); - key_put(instkey); + key_ref = search_process_keyrings(type, description, match, + rka->context); + key_ref_put(instkey_ref); - if (!IS_ERR(key)) + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } /* or search the user-session keyring */ else { - key = keyring_search_aux(context->user->session_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->user->session_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } @@ -517,29 +524,40 @@ struct key *search_process_keyrings(struct key_type *type, no_key: /* no key - decide on the error we're going to go for */ - key = ret ? ret : err; + key_ref = ret ? ret : err; found: - return key; + return key_ref; } /* end search_process_keyrings() */ +/*****************************************************************************/ +/* + * see if the key we're looking at is the target key + */ +static int lookup_user_key_possessed(const struct key *key, const void *target) +{ + return key == target; + +} /* end lookup_user_key_possessed() */ + /*****************************************************************************/ /* * lookup a key given a key ID from userspace with a given permissions mask * - don't create special keyrings unless so requested * - partially constructed keys aren't found unless requested */ -struct key *lookup_user_key(struct task_struct *context, key_serial_t id, - int create, int partial, key_perm_t perm) +key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id, + int create, int partial, key_perm_t perm) { + key_ref_t key_ref, skey_ref; struct key *key; int ret; if (!context) context = current; - key = ERR_PTR(-ENOKEY); + key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: @@ -556,6 +574,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = context->thread_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: @@ -572,6 +591,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = context->signal->process_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: @@ -579,7 +599,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_session_keyring( - context, context->user->session_keyring); + context, context->user->session_keyring); if (ret < 0) goto error; } @@ -588,16 +608,19 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = rcu_dereference(context->signal->session_keyring); atomic_inc(&key->usage); rcu_read_unlock(); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: key = context->user->uid_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: key = context->user->session_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: @@ -606,13 +629,28 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, goto error; default: - key = ERR_PTR(-EINVAL); + key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); - if (IS_ERR(key)) + if (IS_ERR(key)) { + key_ref = ERR_PTR(PTR_ERR(key)); goto error; + } + + key_ref = make_key_ref(key, 0); + + /* check to see if we possess the key */ + skey_ref = search_process_keyrings(key->type, key, + lookup_user_key_possessed, + current); + + if (!IS_ERR(skey_ref)) { + key_put(key); + key_ref = skey_ref; + } + break; } @@ -630,15 +668,15 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, /* check the permissions */ ret = -EACCES; - if (!key_task_permission(key, context, perm)) + if (!key_task_permission(key_ref, context, perm)) goto invalid_key; - error: - return key; +error: + return key_ref; - invalid_key: - key_put(key); - key = ERR_PTR(ret); +invalid_key: + key_ref_put(key_ref); + key_ref = ERR_PTR(ret); goto error; } /* end lookup_user_key() */ @@ -694,9 +732,9 @@ long join_session_keyring(const char *name) ret = keyring->serial; key_put(keyring); - error2: +error2: up(&key_session_sem); - error: +error: return ret; } /* end join_session_keyring() */ diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 90c1506d007c..e6dd366d43a3 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -129,7 +129,7 @@ static struct key *__request_key_construction(struct key_type *type, /* create a key and add it to the queue */ key = key_alloc(type, description, - current->fsuid, current->fsgid, KEY_USR_ALL, 0); + current->fsuid, current->fsgid, KEY_POS_ALL, 0); if (IS_ERR(key)) goto alloc_failed; @@ -365,14 +365,24 @@ struct key *request_key_and_link(struct key_type *type, { struct key_user *user; struct key *key; + key_ref_t key_ref; kenter("%s,%s,%s,%p", type->name, description, callout_info, dest_keyring); /* search all the process keyrings for a key */ - key = search_process_keyrings(type, description, type->match, current); + key_ref = search_process_keyrings(type, description, type->match, + current); - if (PTR_ERR(key) == -EAGAIN) { + kdebug("search 1: %p", key_ref); + + if (!IS_ERR(key_ref)) { + key = key_ref_to_ptr(key_ref); + } + else if (PTR_ERR(key_ref) != -EAGAIN) { + key = ERR_PTR(PTR_ERR(key_ref)); + } + else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); @@ -384,7 +394,7 @@ struct key *request_key_and_link(struct key_type *type, if (!user) goto nomem; - do { + for (;;) { if (signal_pending(current)) goto interrupted; @@ -397,10 +407,22 @@ struct key *request_key_and_link(struct key_type *type, /* someone else made the key we want, so we need to * search again as it might now be available to us */ - key = search_process_keyrings(type, description, - type->match, current); + key_ref = search_process_keyrings(type, description, + type->match, + current); + + kdebug("search 2: %p", key_ref); - } while (PTR_ERR(key) == -EAGAIN); + if (!IS_ERR(key_ref)) { + key = key_ref_to_ptr(key_ref); + break; + } + + if (PTR_ERR(key_ref) != -EAGAIN) { + key = ERR_PTR(PTR_ERR(key_ref)); + break; + } + } key_user_put(user); diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index f22264632229..1ecd3d3fa9f8 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -126,7 +126,7 @@ struct key *request_key_auth_new(struct key *target, struct key **_rkakey) rkakey = key_alloc(&key_type_request_key_auth, desc, current->fsuid, current->fsgid, - KEY_USR_VIEW, 1); + KEY_POS_VIEW | KEY_USR_VIEW, 1); if (IS_ERR(rkakey)) { key_put(keyring); kleave("= %ld", PTR_ERR(rkakey)); -- cgit v1.2.3 From aa55a08687059aa169d10a313c41f238c2070488 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 29 Sep 2005 19:58:53 +0400 Subject: [PATCH] fix TASK_STOPPED vs TASK_NONINTERACTIVE interaction do_signal_stop: for_each_thread(t) { if (t->state < TASK_STOPPED) ++sig->group_stop_count; } However, TASK_NONINTERACTIVE > TASK_STOPPED, so this loop will not count TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE threads. See also wait_task_stopped(), which checks ->state > TASK_STOPPED. Signed-off-by: Oleg Nesterov [ We really probably should always use the appropriate bitmasks to test task states, not do it like this. Using something like #define TASK_RUNNABLE (TASK_RUNNING | TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | TASK_NONINTERACTIVE) and then doing "if (task->state & TASK_RUNNABLE)" or similar. But the ordering of the task states is historical, and keeping the ordering does make sense regardless. ] Signed-off-by: Linus Torvalds --- include/linux/sched.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 49e617fa0f66..afe6c61f13e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -110,11 +110,11 @@ extern unsigned long nr_iowait(void); #define TASK_RUNNING 0 #define TASK_INTERRUPTIBLE 1 #define TASK_UNINTERRUPTIBLE 2 -#define TASK_STOPPED 4 -#define TASK_TRACED 8 -#define EXIT_ZOMBIE 16 -#define EXIT_DEAD 32 -#define TASK_NONINTERACTIVE 64 +#define TASK_NONINTERACTIVE 4 +#define TASK_STOPPED 8 +#define TASK_TRACED 16 +#define EXIT_ZOMBIE 32 +#define EXIT_DEAD 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) -- cgit v1.2.3 From 4a8342d233a39ee582e9f7260e12d2f5fd194a05 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 29 Sep 2005 15:18:21 -0700 Subject: Revert task flag re-ordering, add comments Roland points out that the flags end up having non-obvious dependencies elsewhere, so revert aa55a08687059aa169d10a313c41f238c2070488 and add some comments about why things are as they are. We'll just have to fix up the broken comparisons. Roland has a patch. Signed-off-by: Linus Torvalds --- include/linux/sched.h | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index afe6c61f13e5..c3ba31f210a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -107,14 +107,26 @@ extern unsigned long nr_iowait(void); #include +/* + * Task state bitmask. NOTE! These bits are also + * encoded in fs/proc/array.c: get_task_state(). + * + * We have two separate sets of flags: task->state + * is about runnability, while task->exit_state are + * about the task exiting. Confusing, but this way + * modifying one set can't modify the other one by + * mistake. + */ #define TASK_RUNNING 0 #define TASK_INTERRUPTIBLE 1 #define TASK_UNINTERRUPTIBLE 2 -#define TASK_NONINTERACTIVE 4 -#define TASK_STOPPED 8 -#define TASK_TRACED 16 -#define EXIT_ZOMBIE 32 -#define EXIT_DEAD 64 +#define TASK_STOPPED 4 +#define TASK_TRACED 8 +/* in tsk->exit_state */ +#define EXIT_ZOMBIE 16 +#define EXIT_DEAD 32 +/* in tsk->state again */ +#define TASK_NONINTERACTIVE 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) -- cgit v1.2.3 From 897f15fb587fd2772b9e7ff6ec0265057f3c3975 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Fri, 30 Sep 2005 11:58:55 -0700 Subject: [PATCH] aio: remove unlocked task_list test and resulting race Only one of the run or kick path is supposed to put an iocb on the run list. If both of them do it than one of them can end up referencing a freed iocb. The kick path could delete the task_list item from the wait queue before getting the ctx_lock and putting the iocb on the run list. The run path was testing the task_list item outside the lock so that it could catch ki_retry methods that return -EIOCBRETRY *without* putting the iocb on a wait queue and promising to call kick_iocb. This unlocked check could then race with the kick path to cause both to try and put the iocb on the run list. The patch stops the run path from testing task_list by requring that any ki_retry that returns -EIOCBRETRY *must* guarantee that kick_iocb() will be called in the future. aio_p{read,write}, the only in-tree -EIOCBRETRY users, are updated. Signed-off-by: Zach Brown Signed-off-by: Benjamin LaHaise Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 79 ++++++++++++++++++++++------------------------------- include/linux/aio.h | 34 +++++++++++++++++++++++ 2 files changed, 67 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/fs/aio.c b/fs/aio.c index b8f296999c04..9edc0e4a1219 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -741,19 +741,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) ret = retry(iocb); current->io_wait = NULL; - if (-EIOCBRETRY != ret) { - if (-EIOCBQUEUED != ret) { - BUG_ON(!list_empty(&iocb->ki_wait.task_list)); - aio_complete(iocb, ret, 0); - /* must not access the iocb after this */ - } - } else { - /* - * Issue an additional retry to avoid waiting forever if - * no waits were queued (e.g. in case of a short read). - */ - if (list_empty(&iocb->ki_wait.task_list)) - kiocbSetKicked(iocb); + if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { + BUG_ON(!list_empty(&iocb->ki_wait.task_list)); + aio_complete(iocb, ret, 0); } out: spin_lock_irq(&ctx->ctx_lock); @@ -1327,8 +1317,11 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) } /* - * Default retry method for aio_read (also used for first time submit) - * Responsible for updating iocb state as retries progress + * aio_p{read,write} are the default ki_retry methods for + * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially + * multiple calls to f_op->aio_read(). They loop around partial progress + * instead of returning -EIOCBRETRY because they don't have the means to call + * kick_iocb(). */ static ssize_t aio_pread(struct kiocb *iocb) { @@ -1337,25 +1330,25 @@ static ssize_t aio_pread(struct kiocb *iocb) struct inode *inode = mapping->host; ssize_t ret = 0; - ret = file->f_op->aio_read(iocb, iocb->ki_buf, - iocb->ki_left, iocb->ki_pos); + do { + ret = file->f_op->aio_read(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); + /* + * Can't just depend on iocb->ki_left to determine + * whether we are done. This may have been a short read. + */ + if (ret > 0) { + iocb->ki_buf += ret; + iocb->ki_left -= ret; + } - /* - * Can't just depend on iocb->ki_left to determine - * whether we are done. This may have been a short read. - */ - if (ret > 0) { - iocb->ki_buf += ret; - iocb->ki_left -= ret; /* - * For pipes and sockets we return once we have - * some data; for regular files we retry till we - * complete the entire read or find that we can't - * read any more data (e.g short reads). + * For pipes and sockets we return once we have some data; for + * regular files we retry till we complete the entire read or + * find that we can't read any more data (e.g short reads). */ - if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)) - ret = -EIOCBRETRY; - } + } while (ret > 0 && + !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)); /* This means we must have transferred all that we could */ /* No need to retry anymore */ @@ -1365,27 +1358,21 @@ static ssize_t aio_pread(struct kiocb *iocb) return ret; } -/* - * Default retry method for aio_write (also used for first time submit) - * Responsible for updating iocb state as retries progress - */ +/* see aio_pread() */ static ssize_t aio_pwrite(struct kiocb *iocb) { struct file *file = iocb->ki_filp; ssize_t ret = 0; - ret = file->f_op->aio_write(iocb, iocb->ki_buf, - iocb->ki_left, iocb->ki_pos); - - if (ret > 0) { - iocb->ki_buf += ret; - iocb->ki_left -= ret; - - ret = -EIOCBRETRY; - } + do { + ret = file->f_op->aio_write(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); + if (ret > 0) { + iocb->ki_buf += ret; + iocb->ki_left -= ret; + } + } while (ret > 0); - /* This means we must have transferred all that we could */ - /* No need to retry anymore */ if ((ret == 0) || (iocb->ki_left == 0)) ret = iocb->ki_nbytes - iocb->ki_left; diff --git a/include/linux/aio.h b/include/linux/aio.h index a4d5af907f90..60def658b246 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -43,6 +43,40 @@ struct kioctx; #define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) +/* is there a better place to document function pointer methods? */ +/** + * ki_retry - iocb forward progress callback + * @kiocb: The kiocb struct to advance by performing an operation. + * + * This callback is called when the AIO core wants a given AIO operation + * to make forward progress. The kiocb argument describes the operation + * that is to be performed. As the operation proceeds, perhaps partially, + * ki_retry is expected to update the kiocb with progress made. Typically + * ki_retry is set in the AIO core and it itself calls file_operations + * helpers. + * + * ki_retry's return value determines when the AIO operation is completed + * and an event is generated in the AIO event ring. Except the special + * return values described below, the value that is returned from ki_retry + * is transferred directly into the completion ring as the operation's + * resulting status. Once this has happened ki_retry *MUST NOT* reference + * the kiocb pointer again. + * + * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete() + * will be called on the kiocb pointer in the future. The AIO core will + * not ask the method again -- ki_retry must ensure forward progress. + * aio_complete() must be called once and only once in the future, multiple + * calls may result in undefined behaviour. + * + * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb() + * will be called on the kiocb pointer in the future. This may happen + * through generic helpers that associate kiocb->ki_wait with a wait + * queue head that ki_retry uses via current->io_wait. It can also happen + * with custom tracking and manual calls to kick_iocb(), though that is + * discouraged. In either case, kick_iocb() must be called once and only + * once. ki_retry must ensure forward progress, the AIO core will wait + * indefinitely for kick_iocb() to be called. + */ struct kiocb { struct list_head ki_run_list; long ki_flags; -- cgit v1.2.3 From fd2e54b35bd70d11c160ded4834e2378e915356e Mon Sep 17 00:00:00 2001 From: Diego Calleja Date: Sat, 1 Oct 2005 17:00:48 +0200 Subject: [PATCH] trivial #if -> #ifdef Use '#ifdef' consistently on __KERNEL__. This was reported as bug #5340 (isn't easier to send a fix than report the bug?!) Signed-off-by: Diego Calleja Signed-off-by: Linus Torvalds --- include/linux/mod_devicetable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4ed2107bc020..2f0299a448f6 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -183,7 +183,7 @@ struct of_device_id char name[32]; char type[32]; char compatible[128]; -#if __KERNEL__ +#ifdef __KERNEL__ void *data; #else kernel_ulong_t data; -- cgit v1.2.3 From 325ed8239309cb29f10ea58c5a668058ead11479 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 3 Oct 2005 13:57:23 -0700 Subject: [NET]: Fix packet timestamping. I've found the problem in general. It affects any 64-bit architecture. The problem occurs when you change the system time. Suppose that when you boot your system clock is forward by a day. This gets recorded down in skb_tv_base. You then wind the clock back by a day. From that point onwards the offset will be negative which essentially overflows the 32-bit variables they're stored in. In fact, why don't we just store the real time stamp in those 32-bit variables? After all, we're not going to overflow for quite a while yet. When we do overflow, we'll need a better solution of course. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 12 +++--------- net/core/skbuff.c | 5 ----- net/ipv4/netfilter/ip_queue.c | 4 ++-- net/ipv4/netfilter/ipt_ULOG.c | 4 ++-- net/ipv6/netfilter/ip6_queue.c | 4 ++-- net/netfilter/nfnetlink_log.c | 4 ++-- net/netfilter/nfnetlink_queue.c | 4 ++-- net/packet/af_packet.c | 4 ++-- 8 files changed, 15 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2741c0c55e83..466c879f82b8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -155,8 +155,6 @@ struct skb_shared_info { #define SKB_DATAREF_SHIFT 16 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) -extern struct timeval skb_tv_base; - struct skb_timeval { u32 off_sec; u32 off_usec; @@ -175,7 +173,7 @@ enum { * @prev: Previous buffer in list * @list: List we are on * @sk: Socket we are owned by - * @tstamp: Time we arrived stored as offset to skb_tv_base + * @tstamp: Time we arrived * @dev: Device we arrived on/are leaving by * @input_dev: Device we arrived on * @h: Transport layer header @@ -1255,10 +1253,6 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval * { stamp->tv_sec = skb->tstamp.off_sec; stamp->tv_usec = skb->tstamp.off_usec; - if (skb->tstamp.off_sec) { - stamp->tv_sec += skb_tv_base.tv_sec; - stamp->tv_usec += skb_tv_base.tv_usec; - } } /** @@ -1272,8 +1266,8 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval * */ static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) { - skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; - skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; + skb->tstamp.off_sec = stamp->tv_sec; + skb->tstamp.off_usec = stamp->tv_usec; } extern void __net_timestamp(struct sk_buff *skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f80a28785610..0e9431b59fb2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -71,8 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; static kmem_cache_t *skbuff_fclone_cache __read_mostly; -struct timeval __read_mostly skb_tv_base; - /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always @@ -1708,8 +1706,6 @@ void __init skb_init(void) NULL, NULL); if (!skbuff_fclone_cache) panic("cannot create skbuff cache"); - - do_gettimeofday(&skb_tv_base); } EXPORT_SYMBOL(___pskb_trim); @@ -1743,4 +1739,3 @@ EXPORT_SYMBOL(skb_prepare_seq_read); EXPORT_SYMBOL(skb_seq_read); EXPORT_SYMBOL(skb_abort_seq_read); EXPORT_SYMBOL(skb_find_text); -EXPORT_SYMBOL(skb_tv_base); diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index d54f14d926f6..36339eb39e17 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -240,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; + pmsg->timestamp_sec = entry->skb->tstamp.off_sec; + pmsg->timestamp_usec = entry->skb->tstamp.off_usec; pmsg->mark = entry->skb->nfmark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index e2c14f3cb2fc..2883ccd8a91d 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -225,8 +225,8 @@ static void ipt_ulog_packet(unsigned int hooknum, /* copy hook, prefix, timestamp, payload, etc. */ pm->data_len = copy_len; - pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; - pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; + pm->timestamp_sec = skb->tstamp.off_sec; + pm->timestamp_usec = skb->tstamp.off_usec; pm->mark = skb->nfmark; pm->hook = hooknum; if (prefix != NULL) diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index aa11cf366efa..5027bbe6415e 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -238,8 +238,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; + pmsg->timestamp_sec = entry->skb->tstamp.off_sec; + pmsg->timestamp_usec = entry->skb->tstamp.off_usec; pmsg->mark = entry->skb->nfmark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index ff5601ceedcb..efcd10f996ba 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -494,8 +494,8 @@ __build_packet_message(struct nfulnl_instance *inst, if (skb->tstamp.off_sec) { struct nfulnl_msg_packet_timestamp ts; - ts.sec = cpu_to_be64(skb_tv_base.tv_sec + skb->tstamp.off_sec); - ts.usec = cpu_to_be64(skb_tv_base.tv_usec + skb->tstamp.off_usec); + ts.sec = cpu_to_be64(skb->tstamp.off_sec); + ts.usec = cpu_to_be64(skb->tstamp.off_usec); NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index f81fe8c52e99..eaa44c49567b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -492,8 +492,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (entry->skb->tstamp.off_sec) { struct nfqnl_msg_packet_timestamp ts; - ts.sec = cpu_to_be64(skb_tv_base.tv_sec + entry->skb->tstamp.off_sec); - ts.usec = cpu_to_be64(skb_tv_base.tv_usec + entry->skb->tstamp.off_usec); + ts.sec = cpu_to_be64(entry->skb->tstamp.off_sec); + ts.usec = cpu_to_be64(entry->skb->tstamp.off_usec); NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6a67a87384cc..499ae3df4a44 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -654,8 +654,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe __net_timestamp(skb); sock_enable_timestamp(sk); } - h->tp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; - h->tp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; + h->tp_sec = skb->tstamp.off_sec; + h->tp_usec = skb->tstamp.off_usec; sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); sll->sll_halen = 0; -- cgit v1.2.3 From 81c3d5470ecc70564eb9209946730fe2be93ad06 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 3 Oct 2005 14:13:38 -0700 Subject: [INET]: speedup inet (tcp/dccp) lookups Arnaldo and I agreed it could be applied now, because I have other pending patches depending on this one (Thank you Arnaldo) (The other important patch moves skc_refcnt in a separate cache line, so that the SMP/NUMA performance doesnt suffer from cache line ping pongs) 1) First some performance data : -------------------------------- tcp_v4_rcv() wastes a *lot* of time in __inet_lookup_established() The most time critical code is : sk_for_each(sk, node, &head->chain) { if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } The sk_for_each() does use prefetch() hints but only the begining of "struct sock" is prefetched. As INET_MATCH first comparison uses inet_sk(__sk)->daddr, wich is far away from the begining of "struct sock", it has to bring into CPU cache cold cache line. Each iteration has to use at least 2 cache lines. This can be problematic if some chains are very long. 2) The goal ----------- The idea I had is to change things so that INET_MATCH() may return FALSE in 99% of cases only using the data already in the CPU cache, using one cache line per iteration. 3) Description of the patch --------------------------- Adds a new 'unsigned int skc_hash' field in 'struct sock_common', filling a 32 bits hole on 64 bits platform. struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; Store in this 32 bits field the full hash, not masked by (ehash_size - 1) Using this full hash as the first comparison done in INET_MATCH permits us immediatly skip the element without touching a second cache line in case of a miss. Suppress the sk_hashent/tw_hashent fields since skc_hash (aliased to sk_hash and tw_hash) already contains the slot number if we mask with (ehash_size - 1) File include/net/inet_hashtables.h 64 bits platforms : #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) ((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 32bits platforms: #define TCP_IPV4_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && \ (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) - Adds a prefetch(head->chain.first) in __inet_lookup_established()/__tcp_v4_check_established() and __inet6_lookup_established()/__tcp_v6_check_established() and __dccp_v4_check_established() to bring into cache the first element of the list, before the {read|write}_lock(&head->lock); Signed-off-by: Eric Dumazet Acked-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/ipv6.h | 5 +-- include/linux/tc_ematch/tc_em_meta.h | 2 +- include/net/inet6_hashtables.h | 21 ++++++------ include/net/inet_hashtables.h | 64 +++++++++++++++++++++--------------- include/net/inet_timewait_sock.h | 2 +- include/net/sock.h | 5 +-- net/atm/common.c | 2 +- net/dccp/ipv4.c | 12 +++---- net/ipv4/inet_timewait_sock.c | 6 ++-- net/ipv4/tcp_ipv4.c | 11 ++++--- net/ipv6/tcp_ipv6.c | 18 +++++----- net/sched/em_meta.c | 6 ++-- 12 files changed, 85 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index bb6f88e14061..e0b922785d98 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -372,8 +372,9 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define inet_v6_ipv6only(__sk) 0 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ -#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ - (((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ +#define INET6_MATCH(__sk, __hash, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ ((__sk)->sk_family == AF_INET6) && \ ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index 081b1ee8516e..e21937cf91d0 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -71,7 +71,7 @@ enum TCF_META_ID_SK_SNDBUF, TCF_META_ID_SK_ALLOCS, TCF_META_ID_SK_ROUTE_CAPS, - TCF_META_ID_SK_HASHENT, + TCF_META_ID_SK_HASH, TCF_META_ID_SK_LINGERTIME, TCF_META_ID_SK_ACK_BACKLOG, TCF_META_ID_SK_MAX_ACK_BACKLOG, diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 03df3b157960..5a2beed5a770 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -26,19 +26,18 @@ struct inet_hashinfo; /* I have no idea if this is a good hash for v6 or not. -DaveM */ -static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, - const struct in6_addr *faddr, const u16 fport, - const int ehash_size) +static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const u16 fport) { - int hashent = (lport ^ fport); + unsigned int hashent = (lport ^ fport); hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); hashent ^= hashent >> 16; hashent ^= hashent >> 8; - return (hashent & (ehash_size - 1)); + return hashent; } -static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) +static inline int inet6_sk_ehashfn(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk); @@ -46,7 +45,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) const struct in6_addr *faddr = &np->daddr; const __u16 lport = inet->num; const __u16 fport = inet->dport; - return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size); + return inet6_ehashfn(laddr, lport, faddr, fport); } /* @@ -69,14 +68,14 @@ static inline struct sock * /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ - const int hash = inet6_ehashfn(daddr, hnum, saddr, sport, - hashinfo->ehash_size); - struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; + unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); + struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); + prefetch(head->chain.first); read_lock(&head->lock); sk_for_each(sk, node, &head->chain) { /* For IPV6 do the cheaper port and family tests first. */ - if (INET6_MATCH(sk, saddr, daddr, ports, dif)) + if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 646b6ea7fe26..35f49e65e295 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -108,7 +108,7 @@ struct inet_hashinfo { struct inet_bind_hashbucket *bhash; int bhash_size; - int ehash_size; + unsigned int ehash_size; /* All sockets in TCP_LISTEN state will be in here. This is the only * table where wildcard'd TCP sockets can exist. Hash function here @@ -130,17 +130,16 @@ struct inet_hashinfo { int port_rover; }; -static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, - const __u32 faddr, const __u16 fport, - const int ehash_size) +static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport, + const __u32 faddr, const __u16 fport) { - int h = (laddr ^ lport) ^ (faddr ^ fport); + unsigned int h = (laddr ^ lport) ^ (faddr ^ fport); h ^= h >> 16; h ^= h >> 8; - return h & (ehash_size - 1); + return h; } -static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) +static inline int inet_sk_ehashfn(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const __u32 laddr = inet->rcv_saddr; @@ -148,7 +147,14 @@ static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) const __u32 faddr = inet->daddr; const __u16 fport = inet->dport; - return inet_ehashfn(laddr, lport, faddr, fport, ehash_size); + return inet_ehashfn(laddr, lport, faddr, fport); +} + +static inline struct inet_ehash_bucket *inet_ehash_bucket( + struct inet_hashinfo *hashinfo, + unsigned int hash) +{ + return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)]; } extern struct inet_bind_bucket * @@ -235,9 +241,11 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo, lock = &hashinfo->lhash_lock; inet_listen_wlock(hashinfo); } else { - sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); - list = &hashinfo->ehash[sk->sk_hashent].chain; - lock = &hashinfo->ehash[sk->sk_hashent].lock; + struct inet_ehash_bucket *head; + sk->sk_hash = inet_sk_ehashfn(sk); + head = inet_ehash_bucket(hashinfo, sk->sk_hash); + list = &head->chain; + lock = &head->lock; write_lock(lock); } __sk_add_node(sk, list); @@ -268,9 +276,8 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) inet_listen_wlock(hashinfo); lock = &hashinfo->lhash_lock; } else { - struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent]; - lock = &head->lock; - write_lock_bh(&head->lock); + lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock; + write_lock_bh(lock); } if (__sk_del_node_init(sk)) @@ -337,23 +344,27 @@ sherry_cache: #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr)); #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ +#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ +#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #else /* 32-bit arch */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) -#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_sk(__sk)->daddr == (__saddr)) && \ +#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && \ + (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \ +#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && \ + (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) @@ -378,18 +389,19 @@ static inline struct sock * /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ - const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size); - struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); + struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); + prefetch(head->chain.first); read_lock(&head->lock); sk_for_each(sk, node, &head->chain) { - if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { - if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) + if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) goto hit; } sk = NULL; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 3b070352e869..4ade56ef3a4d 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -112,6 +112,7 @@ struct inet_timewait_sock { #define tw_node __tw_common.skc_node #define tw_bind_node __tw_common.skc_bind_node #define tw_refcnt __tw_common.skc_refcnt +#define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot volatile unsigned char tw_substate; /* 3 bits hole, try to pack */ @@ -126,7 +127,6 @@ struct inet_timewait_sock { /* And these are ours. */ __u8 tw_ipv6only:1; /* 31 bits hole, try to pack */ - int tw_hashent; int tw_timeout; unsigned long tw_ttd; struct inet_bind_bucket *tw_tb; diff --git a/include/net/sock.h b/include/net/sock.h index 8c48fbecb7cf..b6440805c420 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -99,6 +99,7 @@ struct proto; * @skc_node: main hash linkage for various protocol lookup tables * @skc_bind_node: bind hash linkage for various protocol lookup tables * @skc_refcnt: reference count + * @skc_hash: hash value used with various protocol lookup tables * @skc_prot: protocol handlers inside a network family * * This is the minimal network layer representation of sockets, the header @@ -112,6 +113,7 @@ struct sock_common { struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; @@ -139,7 +141,6 @@ struct sock_common { * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_lingertime: %SO_LINGER l_linger setting - * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash) * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used @@ -186,6 +187,7 @@ struct sock { #define sk_node __sk_common.skc_node #define sk_bind_node __sk_common.skc_bind_node #define sk_refcnt __sk_common.skc_refcnt +#define sk_hash __sk_common.skc_hash #define sk_prot __sk_common.skc_prot unsigned char sk_shutdown : 2, sk_no_check : 2, @@ -208,7 +210,6 @@ struct sock { unsigned int sk_allocation; int sk_sndbuf; int sk_route_caps; - int sk_hashent; unsigned long sk_flags; unsigned long sk_lingertime; /* diff --git a/net/atm/common.c b/net/atm/common.c index 801a5813ec60..63feea49fb13 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -46,7 +46,7 @@ static void __vcc_insert_socket(struct sock *sk) struct atm_vcc *vcc = atm_sk(sk); struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)]; - sk->sk_hashent = vcc->vci & (VCC_HTABLE_SIZE - 1); + sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); sk_add_node(sk, head); } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 40fe6afacde6..ae088d1347af 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -62,27 +62,27 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, const int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, - dccp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash); const struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { tw = inet_twsk(sk2); - if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } tw = NULL; /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } @@ -90,7 +90,7 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); - sk->sk_hashent = hash; + sk->sk_hash = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 4d1502a49852..f9076ef3a1a8 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -20,7 +20,7 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi struct inet_bind_hashbucket *bhead; struct inet_bind_bucket *tb; /* Unlink from established hashes. */ - struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent]; + struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash); write_lock(&ehead->lock); if (hlist_unhashed(&tw->tw_node)) { @@ -60,7 +60,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); - struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; + struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); struct inet_bind_hashbucket *bhead; /* Step 1: Put TW into bind hash. Original socket stays there too. Note, that any socket with inet->num != 0 MUST be bound in @@ -106,7 +106,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat tw->tw_dport = inet->dport; tw->tw_family = sk->sk_family; tw->tw_reuse = sk->sk_reuse; - tw->tw_hashent = sk->sk_hashent; + tw->tw_hash = sk->sk_hash; tw->tw_ipv6only = 0; tw->tw_prot = sk->sk_prot_creator; atomic_set(&tw->tw_refcnt, 1); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 13dfb391cdf1..c85819d8474b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -130,19 +130,20 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { tw = inet_twsk(sk2); - if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { + if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2); struct tcp_sock *tp = tcp_sk(sk); @@ -179,7 +180,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } @@ -188,7 +189,7 @@ unique: * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); - sk->sk_hashent = hash; + sk->sk_hash = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 80643e6b346b..d693cb988b78 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -209,9 +209,11 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) lock = &tcp_hashinfo.lhash_lock; inet_listen_wlock(&tcp_hashinfo); } else { - sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size); - list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; - lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; + unsigned int hash; + sk->sk_hash = hash = inet6_sk_ehashfn(sk); + hash &= (tcp_hashinfo.ehash_size - 1); + list = &tcp_hashinfo.ehash[hash].chain; + lock = &tcp_hashinfo.ehash[hash].lock; write_lock(lock); } @@ -322,13 +324,13 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, const struct in6_addr *saddr = &np->daddr; const int dif = sk->sk_bound_dev_if; const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport, - tcp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; + unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ @@ -365,14 +367,14 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET6_MATCH(sk2, saddr, daddr, ports, dif)) + if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) goto not_unique; } unique: BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); - sk->sk_hashent = hash; + sk->sk_hash = hash; sock_prot_inc_use(sk->sk_prot); write_unlock(&head->lock); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 00eae5f9a01a..cf68a59fdc5a 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -393,10 +393,10 @@ META_COLLECTOR(int_sk_route_caps) dst->value = skb->sk->sk_route_caps; } -META_COLLECTOR(int_sk_hashent) +META_COLLECTOR(int_sk_hash) { SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_hashent; + dst->value = skb->sk->sk_hash; } META_COLLECTOR(int_sk_lingertime) @@ -515,7 +515,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps), - [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent), + [META_ID(SK_HASH)] = META_FUNC(int_sk_hash), [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), -- cgit v1.2.3 From e5ed639913eea3e4783a550291775ab78dd84966 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 3 Oct 2005 14:35:55 -0700 Subject: [IPV4]: Replace __in_dev_get with __in_dev_get_rcu/rtnl The following patch renames __in_dev_get() to __in_dev_get_rtnl() and introduces __in_dev_get_rcu() to cover the second case. 1) RCU with refcnt should use in_dev_get(). 2) RCU without refcnt should use __in_dev_get_rcu(). 3) All others must hold RTNL and use __in_dev_get_rtnl(). There is one exception in net/ipv4/route.c which is in fact a pre-existing race condition. I've marked it as such so that we remember to fix it. This patch is based on suggestions and prior work by Suzanne Wood and Paul McKenney. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- drivers/net/wan/sdlamain.c | 23 ++++++++++++++--------- drivers/net/wan/syncppp.c | 2 +- drivers/net/wireless/strip.c | 4 ++-- drivers/parisc/led.c | 5 ++++- drivers/s390/net/qeth_main.c | 4 ++-- include/linux/inetdevice.h | 12 ++++++++++-- net/atm/clip.c | 2 +- net/core/netpoll.c | 2 +- net/core/pktgen.c | 2 +- net/econet/af_econet.c | 2 +- net/ipv4/arp.c | 10 +++++----- net/ipv4/devinet.c | 22 +++++++++++----------- net/ipv4/fib_frontend.c | 4 ++-- net/ipv4/fib_semantics.c | 4 ++-- net/ipv4/igmp.c | 2 +- net/ipv4/ip_gre.c | 4 ++-- net/ipv4/ipmr.c | 6 +++--- net/ipv4/netfilter/ip_conntrack_netbios_ns.c | 2 +- net/ipv4/netfilter/ipt_REDIRECT.c | 2 +- net/ipv4/route.c | 6 ++++-- net/ipv6/addrconf.c | 2 +- net/irda/irlan/irlan_eth.c | 2 +- net/sctp/protocol.c | 2 +- 24 files changed, 73 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6d00c3de1a83..bf81cd45e4d4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2776,7 +2776,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev) return 0; rcu_read_lock(); - idev = __in_dev_get(dev); + idev = __in_dev_get_rcu(dev); if (!idev) goto out; diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c index 74e151acef3e..7a8b22a7ea31 100644 --- a/drivers/net/wan/sdlamain.c +++ b/drivers/net/wan/sdlamain.c @@ -57,6 +57,7 @@ #include /* request_region(), release_region() */ #include /* WAN router definitions */ #include /* WANPIPE common user API definitions */ +#include #include #include /* phys_to_virt() */ @@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option) struct in_ifaddr *ifaddr; struct in_device *in_dev; + unsigned long addr = 0; - if ((in_dev = __in_dev_get(dev)) == NULL){ - return 0; + rcu_read_lock(); + if ((in_dev = __in_dev_get_rcu(dev)) == NULL){ + goto out; } if ((ifaddr = in_dev->ifa_list)== NULL ){ - return 0; + goto out; } switch (option){ case WAN_LOCAL_IP: - return ifaddr->ifa_local; + addr = ifaddr->ifa_local; break; case WAN_POINTOPOINT_IP: - return ifaddr->ifa_address; + addr = ifaddr->ifa_address; break; case WAN_NETMASK_IP: - return ifaddr->ifa_mask; + addr = ifaddr->ifa_mask; break; case WAN_BROADCAST_IP: - return ifaddr->ifa_broadcast; + addr = ifaddr->ifa_broadcast; break; default: - return 0; + break; } - return 0; +out: + rcu_read_unlock(); + return addr; } void add_gateway(sdla_t *card, struct net_device *dev) diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index b56a7b516d24..a6d3b55013a5 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -769,7 +769,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb) u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */ #ifdef CONFIG_INET rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) != NULL) + if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { for (ifa=in_dev->ifa_list; ifa != NULL; ifa=ifa->ifa_next) { diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 4b0acae22b0d..7bc7fc823128 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer, struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(strip_info->dev); + in_dev = __in_dev_get_rcu(strip_info->dev); if (in_dev == NULL) { rcu_read_unlock(); return NULL; @@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb) brd = addr = 0; rcu_read_lock(); - in_dev = __in_dev_get(strip_info->dev); + in_dev = __in_dev_get_rcu(strip_info->dev); if (in_dev) { if (in_dev->ifa_list) { brd = in_dev->ifa_list->ifa_broadcast; diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index e90fb72a6962..286902298e33 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void) /* we are running as tasklet, so locking dev_base * for reading should be OK */ read_lock(&dev_base_lock); + rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { struct net_device_stats *stats; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); if (!in_dev || !in_dev->ifa_list) continue; if (LOOPBACK(in_dev->ifa_list->ifa_local)) @@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void) rx_total += stats->rx_packets; tx_total += stats->tx_packets; } + rcu_read_unlock(); read_unlock(&dev_base_lock); retval = 0; diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 86582cf1e19e..71de834ece1a 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -5200,7 +5200,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid) if (!card->vlangrp) return; rcu_read_lock(); - in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); + in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); if (!in_dev) goto out; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { @@ -7725,7 +7725,7 @@ qeth_arp_constructor(struct neighbour *neigh) goto out; rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) { rcu_read_unlock(); return -EINVAL; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 7e1e15f934f3..fd7af86151b1 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -142,13 +142,21 @@ static __inline__ int bad_mask(u32 mask, u32 addr) #define endfor_ifa(in_dev) } +static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) +{ + struct in_device *in_dev = dev->ip_ptr; + if (in_dev) + in_dev = rcu_dereference(in_dev); + return in_dev; +} + static __inline__ struct in_device * in_dev_get(const struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); - in_dev = dev->ip_ptr; + in_dev = __in_dev_get_rcu(dev); if (in_dev) atomic_inc(&in_dev->refcnt); rcu_read_unlock(); @@ -156,7 +164,7 @@ in_dev_get(const struct net_device *dev) } static __inline__ struct in_device * -__in_dev_get(const struct net_device *dev) +__in_dev_get_rtnl(const struct net_device *dev) { return (struct in_device*)dev->ip_ptr; } diff --git a/net/atm/clip.c b/net/atm/clip.c index 28dab55a4387..4f54c9a5e84a 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -310,7 +310,7 @@ static int clip_constructor(struct neighbour *neigh) if (neigh->type != RTN_UNICAST) return -EINVAL; rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return -EINVAL; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5265dfd69928..802fe11efad0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np) if (!np->local_ip) { rcu_read_lock(); - in_dev = __in_dev_get(ndev); + in_dev = __in_dev_get_rcu(ndev); if (!in_dev || !in_dev->ifa_list) { rcu_read_unlock(); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b7f2d65a614f..44de070b6045 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1667,7 +1667,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(pkt_dev->odev); + in_dev = __in_dev_get_rcu(pkt_dev->odev); if (in_dev) { if (in_dev->ifa_list) { pkt_dev->saddr_min = in_dev->ifa_list->ifa_address; diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 4a62093eb343..34fdac51df96 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -406,7 +406,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, unsigned long network = 0; rcu_read_lock(); - idev = __in_dev_get(dev); + idev = __in_dev_get_rcu(dev); if (idev) { if (idev->ifa_list) network = ntohl(idev->ifa_list->ifa_address) & diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index ec0e36893b01..b425748f02d7 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -241,7 +241,7 @@ static int arp_constructor(struct neighbour *neigh) neigh->type = inet_addr_type(addr); rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) { rcu_read_unlock(); return -EINVAL; @@ -989,8 +989,8 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev) ipv4_devconf.proxy_arp = 1; return 0; } - if (__in_dev_get(dev)) { - __in_dev_get(dev)->cnf.proxy_arp = 1; + if (__in_dev_get_rtnl(dev)) { + __in_dev_get_rtnl(dev)->cnf.proxy_arp = 1; return 0; } return -ENXIO; @@ -1095,8 +1095,8 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev) ipv4_devconf.proxy_arp = 0; return 0; } - if (__in_dev_get(dev)) { - __in_dev_get(dev)->cnf.proxy_arp = 0; + if (__in_dev_get_rtnl(dev)) { + __in_dev_get_rtnl(dev)->cnf.proxy_arp = 0; return 0; } return -ENXIO; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index ba2895ae8151..74f2207e131a 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -351,7 +351,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa) static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); @@ -449,7 +449,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg goto out; rc = -ENOBUFS; - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { in_dev = inetdev_init(dev); if (!in_dev) goto out; @@ -584,7 +584,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) if (colon) *colon = ':'; - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ @@ -748,7 +748,7 @@ rarok: static int inet_gifconf(struct net_device *dev, char __user *buf, int len) { - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); struct in_ifaddr *ifa; struct ifreq ifr; int done = 0; @@ -791,7 +791,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto no_in_dev; @@ -818,7 +818,7 @@ no_in_dev: read_lock(&dev_base_lock); rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { - if ((in_dev = __in_dev_get(dev)) == NULL) + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) continue; for_primary_ifa(in_dev) { @@ -887,7 +887,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop if (dev) { rcu_read_lock(); - if ((in_dev = __in_dev_get(dev))) + if ((in_dev = __in_dev_get_rcu(dev))) addr = confirm_addr_indev(in_dev, dst, local, scope); rcu_read_unlock(); @@ -897,7 +897,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop read_lock(&dev_base_lock); rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { - if ((in_dev = __in_dev_get(dev))) { + if ((in_dev = __in_dev_get_rcu(dev))) { addr = confirm_addr_indev(in_dev, dst, local, scope); if (addr) break; @@ -957,7 +957,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); @@ -1078,7 +1078,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) if (idx > s_idx) s_ip_idx = 0; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { rcu_read_unlock(); continue; } @@ -1149,7 +1149,7 @@ void inet_forward_change(void) for (dev = dev_base; dev; dev = dev->next) { struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev) in_dev->cnf.forwarding = on; rcu_read_unlock(); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4e1379f71269..e61bc7177eb1 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -173,7 +173,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif, no_addr = rpf = 0; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev) { no_addr = in_dev->ifa_list == NULL; rpf = IN_DEV_RPFILTER(in_dev); @@ -607,7 +607,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (event == NETDEV_UNREGISTER) { fib_disable_ip(dev, 2); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d41219e8037c..186f20c4a45e 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1087,7 +1087,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm, rta->rta_oif = &dev->ifindex; if (colon) { struct in_ifaddr *ifa; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (!in_dev) return -ENODEV; *colon = ':'; @@ -1268,7 +1268,7 @@ int fib_sync_up(struct net_device *dev) } if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) continue; - if (nh->nh_dev != dev || __in_dev_get(dev) == NULL) + if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) continue; alive++; spin_lock_bh(&fib_multipath_lock); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 70c44e4c3ceb..8b6d3939e1e6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1323,7 +1323,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) } if (dev) { imr->imr_ifindex = dev->ifindex; - idev = __in_dev_get(dev); + idev = __in_dev_get_rtnl(dev); } return idev; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f0d5740d7e22..896ce3f8f53a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1104,10 +1104,10 @@ static int ipgre_open(struct net_device *dev) return -EADDRNOTAVAIL; dev = rt->u.dst.dev; ip_rt_put(rt); - if (__in_dev_get(dev) == NULL) + if (__in_dev_get_rtnl(dev) == NULL) return -EADDRNOTAVAIL; t->mlink = dev->ifindex; - ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr); + ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); } return 0; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9dbf5909f3a6..302b7eb507c9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -149,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) { dev->flags |= IFF_MULTICAST; - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rtnl(dev); if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL) goto failure; in_dev->cnf.rp_filter = 0; @@ -278,7 +278,7 @@ static int vif_delete(int vifi) dev_set_allmulti(dev, -1); - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { in_dev->cnf.mc_forwarding--; ip_rt_multicast_event(in_dev); } @@ -421,7 +421,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) return -EINVAL; } - if ((in_dev = __in_dev_get(dev)) == NULL) + if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) return -EADDRNOTAVAIL; in_dev->cnf.mc_forwarding++; dev_set_allmulti(dev, +1); diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c index 577bac22dcc6..186646eb249f 100644 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c @@ -58,7 +58,7 @@ static int help(struct sk_buff **pskb, goto out; rcu_read_lock(); - in_dev = __in_dev_get(rt->u.dst.dev); + in_dev = __in_dev_get_rcu(rt->u.dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { if (ifa->ifa_broadcast == iph->daddr) { diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 715cb613405c..5245bfd33d52 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c @@ -93,7 +93,7 @@ redirect_target(struct sk_buff **pskb, newdst = 0; rcu_read_lock(); - indev = __in_dev_get((*pskb)->dev); + indev = __in_dev_get_rcu((*pskb)->dev); if (indev && (ifa = indev->ifa_list)) newdst = ifa->ifa_local; rcu_read_unlock(); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8549f26e2495..381dd6a6aebb 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2128,7 +2128,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr, struct in_device *in_dev; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { int our = ip_check_mc(in_dev, daddr, saddr, skb->nh.iph->protocol); if (our @@ -2443,7 +2443,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) err = -ENODEV; if (dev_out == NULL) goto out; - if (__in_dev_get(dev_out) == NULL) { + + /* RACE: Check return value of inet_select_addr instead. */ + if (__in_dev_get_rtnl(dev_out) == NULL) { dev_put(dev_out); goto out; /* Wrong error code */ } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4e509e52fbc1..a970b4727ce8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1806,7 +1806,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) } for (dev = dev_base; dev != NULL; dev = dev->next) { - struct in_device * in_dev = __in_dev_get(dev); + struct in_device * in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr * ifa; diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 071cd2cefd8a..953e255d2bc8 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -310,7 +310,7 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev) #ifdef CONFIG_INET IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) goto out; if (in_dev->ifa_list) diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e7025be77691..f01d1c9002a1 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -147,7 +147,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, struct sctp_sockaddr_entry *addr; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { rcu_read_unlock(); return; } -- cgit v1.2.3